1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
40 /*********************************************************************
42 *********************************************************************/
43 char ixl_driver_version[] = "1.2.8";
45 /*********************************************************************
48 * Used by probe to select devices to load on
49 * Last field stores an index into ixl_strings
50 * Last entry must be all 0s
52 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 *********************************************************************/
55 static ixl_vendor_info_t ixl_vendor_info_array[] =
57 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65 /* required last entry */
69 /*********************************************************************
70 * Table of branding strings
71 *********************************************************************/
73 static char *ixl_strings[] = {
74 "Intel(R) Ethernet Connection XL710 Driver"
78 /*********************************************************************
80 *********************************************************************/
81 static int ixl_probe(device_t);
82 static int ixl_attach(device_t);
83 static int ixl_detach(device_t);
84 static int ixl_shutdown(device_t);
85 static int ixl_get_hw_capabilities(struct ixl_pf *);
86 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
88 static void ixl_init(void *);
89 static void ixl_init_locked(struct ixl_pf *);
90 static void ixl_stop(struct ixl_pf *);
91 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
92 static int ixl_media_change(struct ifnet *);
93 static void ixl_update_link_status(struct ixl_pf *);
94 static int ixl_allocate_pci_resources(struct ixl_pf *);
95 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
96 static int ixl_setup_stations(struct ixl_pf *);
97 static int ixl_setup_vsi(struct ixl_vsi *);
98 static int ixl_initialize_vsi(struct ixl_vsi *);
99 static int ixl_assign_vsi_msix(struct ixl_pf *);
100 static int ixl_assign_vsi_legacy(struct ixl_pf *);
101 static int ixl_init_msix(struct ixl_pf *);
102 static void ixl_configure_msix(struct ixl_pf *);
103 static void ixl_configure_itr(struct ixl_pf *);
104 static void ixl_configure_legacy(struct ixl_pf *);
105 static void ixl_free_pci_resources(struct ixl_pf *);
106 static void ixl_local_timer(void *);
107 static int ixl_setup_interface(device_t, struct ixl_vsi *);
108 static bool ixl_config_link(struct i40e_hw *);
109 static void ixl_config_rss(struct ixl_vsi *);
110 static void ixl_set_queue_rx_itr(struct ixl_queue *);
111 static void ixl_set_queue_tx_itr(struct ixl_queue *);
112 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
114 static void ixl_enable_rings(struct ixl_vsi *);
115 static void ixl_disable_rings(struct ixl_vsi *);
116 static void ixl_enable_intr(struct ixl_vsi *);
117 static void ixl_disable_intr(struct ixl_vsi *);
119 static void ixl_enable_adminq(struct i40e_hw *);
120 static void ixl_disable_adminq(struct i40e_hw *);
121 static void ixl_enable_queue(struct i40e_hw *, int);
122 static void ixl_disable_queue(struct i40e_hw *, int);
123 static void ixl_enable_legacy(struct i40e_hw *);
124 static void ixl_disable_legacy(struct i40e_hw *);
126 static void ixl_set_promisc(struct ixl_vsi *);
127 static void ixl_add_multi(struct ixl_vsi *);
128 static void ixl_del_multi(struct ixl_vsi *);
129 static void ixl_register_vlan(void *, struct ifnet *, u16);
130 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
131 static void ixl_setup_vlan_filters(struct ixl_vsi *);
133 static void ixl_init_filters(struct ixl_vsi *);
134 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
135 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
136 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
137 static void ixl_del_hw_filters(struct ixl_vsi *, int);
138 static struct ixl_mac_filter *
139 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
140 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
142 /* Sysctl debug interface */
143 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
144 static void ixl_print_debug_info(struct ixl_pf *);
146 /* The MSI/X Interrupt handlers */
147 static void ixl_intr(void *);
148 static void ixl_msix_que(void *);
149 static void ixl_msix_adminq(void *);
150 static void ixl_handle_mdd_event(struct ixl_pf *);
152 /* Deferred interrupt tasklets */
153 static void ixl_do_adminq(void *, int);
155 /* Sysctl handlers */
156 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
157 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
158 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
159 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
162 static void ixl_add_hw_stats(struct ixl_pf *);
163 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
164 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
165 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
166 struct sysctl_oid_list *,
167 struct i40e_eth_stats *);
168 static void ixl_update_stats_counters(struct ixl_pf *);
169 static void ixl_update_eth_stats(struct ixl_vsi *);
170 static void ixl_pf_reset_stats(struct ixl_pf *);
171 static void ixl_vsi_reset_stats(struct ixl_vsi *);
172 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
174 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
178 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
179 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
180 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
181 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
182 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
183 static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
186 /*********************************************************************
187 * FreeBSD Device Interface Entry Points
188 *********************************************************************/
190 static device_method_t ixl_methods[] = {
191 /* Device interface */
192 DEVMETHOD(device_probe, ixl_probe),
193 DEVMETHOD(device_attach, ixl_attach),
194 DEVMETHOD(device_detach, ixl_detach),
195 DEVMETHOD(device_shutdown, ixl_shutdown),
199 static driver_t ixl_driver = {
200 "ixl", ixl_methods, sizeof(struct ixl_pf),
203 devclass_t ixl_devclass;
204 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
206 MODULE_DEPEND(ixl, pci, 1, 1, 1);
207 MODULE_DEPEND(ixl, ether, 1, 1, 1);
210 ** Global reset mutex
212 static struct mtx ixl_reset_mtx;
215 ** TUNEABLE PARAMETERS:
218 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
219 "IXL driver parameters");
222 * MSIX should be the default for best performance,
223 * but this allows it to be forced off for testing.
225 static int ixl_enable_msix = 1;
226 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
228 "Enable MSI-X interrupts");
231 ** Number of descriptors per ring:
232 ** - TX and RX are the same size
234 static int ixl_ringsz = DEFAULT_RING;
235 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
237 &ixl_ringsz, 0, "Descriptor Ring Size");
240 ** This can be set manually, if left as 0 the
241 ** number of queues will be calculated based
242 ** on cpus and msix vectors available.
244 int ixl_max_queues = 0;
245 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
247 &ixl_max_queues, 0, "Number of Queues");
250 ** Controls for Interrupt Throttling
251 ** - true/false for dynamic adjustment
252 ** - default values for static ITR
254 int ixl_dynamic_rx_itr = 0;
255 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
256 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
257 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
259 int ixl_dynamic_tx_itr = 0;
260 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
262 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
264 int ixl_rx_itr = IXL_ITR_8K;
265 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
266 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
267 &ixl_rx_itr, 0, "RX Interrupt Rate");
269 int ixl_tx_itr = IXL_ITR_4K;
270 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
272 &ixl_tx_itr, 0, "TX Interrupt Rate");
275 static int ixl_enable_fdir = 1;
276 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
277 /* Rate at which we sample */
278 int ixl_atr_rate = 20;
279 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
283 static char *ixl_fc_string[6] = {
293 /*********************************************************************
294 * Device identification routine
296 * ixl_probe determines if the driver should be loaded on
297 * the hardware based on PCI vendor/device id of the device.
299 * return BUS_PROBE_DEFAULT on success, positive on failure
300 *********************************************************************/
303 ixl_probe(device_t dev)
305 ixl_vendor_info_t *ent;
307 u16 pci_vendor_id, pci_device_id;
308 u16 pci_subvendor_id, pci_subdevice_id;
309 char device_name[256];
310 static bool lock_init = FALSE;
312 INIT_DEBUGOUT("ixl_probe: begin");
314 pci_vendor_id = pci_get_vendor(dev);
315 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
318 pci_device_id = pci_get_device(dev);
319 pci_subvendor_id = pci_get_subvendor(dev);
320 pci_subdevice_id = pci_get_subdevice(dev);
322 ent = ixl_vendor_info_array;
323 while (ent->vendor_id != 0) {
324 if ((pci_vendor_id == ent->vendor_id) &&
325 (pci_device_id == ent->device_id) &&
327 ((pci_subvendor_id == ent->subvendor_id) ||
328 (ent->subvendor_id == 0)) &&
330 ((pci_subdevice_id == ent->subdevice_id) ||
331 (ent->subdevice_id == 0))) {
332 sprintf(device_name, "%s, Version - %s",
333 ixl_strings[ent->index],
335 device_set_desc_copy(dev, device_name);
336 /* One shot mutex init */
337 if (lock_init == FALSE) {
339 mtx_init(&ixl_reset_mtx,
341 "IXL RESET Lock", MTX_DEF);
343 return (BUS_PROBE_DEFAULT);
350 /*********************************************************************
351 * Device initialization routine
353 * The attach entry point is called when the driver is being loaded.
354 * This routine identifies the type of hardware, allocates all resources
355 * and initializes the hardware.
357 * return 0 on success, positive on failure
358 *********************************************************************/
361 ixl_attach(device_t dev)
369 INIT_DEBUGOUT("ixl_attach: begin");
371 /* Allocate, clear, and link in our primary soft structure */
372 pf = device_get_softc(dev);
373 pf->dev = pf->osdep.dev = dev;
377 ** Note this assumes we have a single embedded VSI,
378 ** this could be enhanced later to allocate multiple
384 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
386 /* Set up the timer callout */
387 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
390 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
391 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
392 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
393 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
395 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
396 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
398 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
400 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
403 pf, 0, ixl_current_speed, "A", "Current Port Speed");
405 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
408 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
410 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
411 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
412 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
413 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
415 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
416 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417 OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
418 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
420 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
421 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
423 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
425 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
426 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
427 OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
428 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
431 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
432 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
433 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
434 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
436 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
437 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
438 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
439 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
441 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
442 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
443 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
444 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
446 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
449 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
451 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
454 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
456 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
459 pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
462 /* Save off the PCI information */
463 hw->vendor_id = pci_get_vendor(dev);
464 hw->device_id = pci_get_device(dev);
465 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
466 hw->subsystem_vendor_id =
467 pci_read_config(dev, PCIR_SUBVEND_0, 2);
468 hw->subsystem_device_id =
469 pci_read_config(dev, PCIR_SUBDEV_0, 2);
471 hw->bus.device = pci_get_slot(dev);
472 hw->bus.func = pci_get_function(dev);
474 /* Do PCI setup - map BAR0, etc */
475 if (ixl_allocate_pci_resources(pf)) {
476 device_printf(dev, "Allocation of PCI resources failed\n");
481 /* Create for initial debugging use */
482 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
483 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
485 ixl_debug_info, "I", "Debug Information");
488 /* Establish a clean starting point */
490 error = i40e_pf_reset(hw);
492 device_printf(dev,"PF reset failure %x\n", error);
497 /* For now always do an initial CORE reset on first device */
499 static int ixl_dev_count;
500 static int ixl_dev_track[32];
502 int i, found = FALSE;
503 u16 bus = pci_get_bus(dev);
505 mtx_lock(&ixl_reset_mtx);
506 my_dev = (bus << 8) | hw->bus.device;
508 for (i = 0; i < ixl_dev_count; i++) {
509 if (ixl_dev_track[i] == my_dev)
516 ixl_dev_track[ixl_dev_count] = my_dev;
519 INIT_DEBUGOUT("Initial CORE RESET\n");
520 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
525 reg = rd32(hw, I40E_GLGEN_RSTAT);
526 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
531 wr32(hw, I40E_PF_ATQLEN, 0);
532 wr32(hw, I40E_PF_ATQBAL, 0);
533 wr32(hw, I40E_PF_ATQBAH, 0);
534 i40e_clear_pxe_mode(hw);
536 mtx_unlock(&ixl_reset_mtx);
539 /* Set admin queue parameters */
540 hw->aq.num_arq_entries = IXL_AQ_LEN;
541 hw->aq.num_asq_entries = IXL_AQ_LEN;
542 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
543 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
545 /* Initialize the shared code */
546 error = i40e_init_shared_code(hw);
548 device_printf(dev,"Unable to initialize the shared code\n");
553 /* Set up the admin queue */
554 error = i40e_init_adminq(hw);
556 device_printf(dev, "The driver for the device stopped "
557 "because the NVM image is newer than expected.\n"
558 "You must install the most recent version of "
559 " the network driver.\n");
562 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
564 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
565 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
566 device_printf(dev, "The driver for the device detected "
567 "a newer version of the NVM image than expected.\n"
568 "Please install the most recent version of the network driver.\n");
569 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
570 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
571 device_printf(dev, "The driver for the device detected "
572 "an older version of the NVM image than expected.\n"
573 "Please update the NVM image.\n");
576 i40e_clear_pxe_mode(hw);
578 /* Get capabilities from the device */
579 error = ixl_get_hw_capabilities(pf);
581 device_printf(dev, "HW capabilities failure!\n");
585 /* Set up host memory cache */
586 error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
588 device_printf(dev, "init_lan_hmc failed: %d\n", error);
592 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
594 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
598 /* Disable LLDP from the firmware */
599 i40e_aq_stop_lldp(hw, TRUE, NULL);
601 i40e_get_mac_addr(hw, hw->mac.addr);
602 error = i40e_validate_mac_addr(hw->mac.addr);
604 device_printf(dev, "validate_mac_addr failed: %d\n", error);
607 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
608 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
610 /* Set up VSI and queues */
611 if (ixl_setup_stations(pf) != 0) {
612 device_printf(dev, "setup stations failed!\n");
617 /* Initialize mac filter list for VSI */
618 SLIST_INIT(&vsi->ftl);
620 /* Set up interrupt routing here */
622 error = ixl_assign_vsi_msix(pf);
624 error = ixl_assign_vsi_legacy(pf);
629 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
631 device_printf(dev, "link restart failed, aq_err=%d\n",
632 pf->hw.aq.asq_last_status);
635 /* Determine link state */
636 vsi->link_up = ixl_config_link(hw);
638 /* Report if Unqualified modules are found */
639 if ((vsi->link_up == FALSE) &&
640 (pf->hw.phy.link_info.link_info &
641 I40E_AQ_MEDIA_AVAILABLE) &&
642 (!(pf->hw.phy.link_info.an_info &
643 I40E_AQ_QUALIFIED_MODULE)))
644 device_printf(dev, "Link failed because "
645 "an unqualified module was detected\n");
647 /* Setup OS specific network interface */
648 if (ixl_setup_interface(dev, vsi) != 0) {
649 device_printf(dev, "interface setup failed!\n");
654 /* Get the bus configuration and set the shared code */
655 bus = ixl_get_bus_info(hw, dev);
656 i40e_set_pci_config_data(hw, bus);
658 /* Initialize statistics */
659 ixl_pf_reset_stats(pf);
660 ixl_update_stats_counters(pf);
661 ixl_add_hw_stats(pf);
663 /* Reset port's advertised speeds */
664 if (!i40e_is_40G_device(hw->device_id)) {
665 pf->advertised_speed = 0x7;
666 ixl_set_advertised_speeds(pf, 0x7);
669 /* Register for VLAN events */
670 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
671 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
672 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
673 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
676 INIT_DEBUGOUT("ixl_attach: end");
680 if (vsi->ifp != NULL)
683 i40e_shutdown_lan_hmc(hw);
685 i40e_shutdown_adminq(hw);
687 ixl_free_pci_resources(pf);
689 IXL_PF_LOCK_DESTROY(pf);
693 /*********************************************************************
694 * Device removal routine
696 * The detach entry point is called when the driver is being removed.
697 * This routine stops the adapter and deallocates all the resources
698 * that were allocated for driver operation.
700 * return 0 on success, positive on failure
701 *********************************************************************/
704 ixl_detach(device_t dev)
706 struct ixl_pf *pf = device_get_softc(dev);
707 struct i40e_hw *hw = &pf->hw;
708 struct ixl_vsi *vsi = &pf->vsi;
709 struct ixl_queue *que = vsi->queues;
712 INIT_DEBUGOUT("ixl_detach: begin");
714 /* Make sure VLANS are not using driver */
715 if (vsi->ifp->if_vlantrunk != NULL) {
716 device_printf(dev,"Vlan in use, detach first\n");
724 for (int i = 0; i < vsi->num_queues; i++, que++) {
726 taskqueue_drain(que->tq, &que->task);
727 taskqueue_drain(que->tq, &que->tx_task);
728 taskqueue_free(que->tq);
732 /* Shutdown LAN HMC */
733 status = i40e_shutdown_lan_hmc(hw);
736 "Shutdown LAN HMC failed with code %d\n", status);
738 /* Shutdown admin queue */
739 status = i40e_shutdown_adminq(hw);
742 "Shutdown Admin queue failed with code %d\n", status);
744 /* Unregister VLAN events */
745 if (vsi->vlan_attach != NULL)
746 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
747 if (vsi->vlan_detach != NULL)
748 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
750 ether_ifdetach(vsi->ifp);
751 callout_drain(&pf->timer);
754 ixl_free_pci_resources(pf);
755 bus_generic_detach(dev);
758 IXL_PF_LOCK_DESTROY(pf);
762 /*********************************************************************
764 * Shutdown entry point
766 **********************************************************************/
769 ixl_shutdown(device_t dev)
771 struct ixl_pf *pf = device_get_softc(dev);
779 /*********************************************************************
781 * Get the hardware capabilities
783 **********************************************************************/
786 ixl_get_hw_capabilities(struct ixl_pf *pf)
788 struct i40e_aqc_list_capabilities_element_resp *buf;
789 struct i40e_hw *hw = &pf->hw;
790 device_t dev = pf->dev;
795 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
797 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
798 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
799 device_printf(dev, "Unable to allocate cap memory\n");
803 /* This populates the hw struct */
804 error = i40e_aq_discover_capabilities(hw, buf, len,
805 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
807 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
809 /* retry once with a larger buffer */
813 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
814 device_printf(dev, "capability discovery failed: %d\n",
815 pf->hw.aq.asq_last_status);
819 /* Capture this PF's starting queue pair */
820 pf->qbase = hw->func_caps.base_queue;
823 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
824 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
825 hw->pf_id, hw->func_caps.num_vfs,
826 hw->func_caps.num_msix_vectors,
827 hw->func_caps.num_msix_vectors_vf,
828 hw->func_caps.fd_filters_guaranteed,
829 hw->func_caps.fd_filters_best_effort,
830 hw->func_caps.num_tx_qp,
831 hw->func_caps.num_rx_qp,
832 hw->func_caps.base_queue);
838 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
840 device_t dev = vsi->dev;
842 /* Enable/disable TXCSUM/TSO4 */
843 if (!(ifp->if_capenable & IFCAP_TXCSUM)
844 && !(ifp->if_capenable & IFCAP_TSO4)) {
845 if (mask & IFCAP_TXCSUM) {
846 ifp->if_capenable |= IFCAP_TXCSUM;
847 /* enable TXCSUM, restore TSO if previously enabled */
848 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
849 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
850 ifp->if_capenable |= IFCAP_TSO4;
853 else if (mask & IFCAP_TSO4) {
854 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
855 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
857 "TSO4 requires txcsum, enabling both...\n");
859 } else if((ifp->if_capenable & IFCAP_TXCSUM)
860 && !(ifp->if_capenable & IFCAP_TSO4)) {
861 if (mask & IFCAP_TXCSUM)
862 ifp->if_capenable &= ~IFCAP_TXCSUM;
863 else if (mask & IFCAP_TSO4)
864 ifp->if_capenable |= IFCAP_TSO4;
865 } else if((ifp->if_capenable & IFCAP_TXCSUM)
866 && (ifp->if_capenable & IFCAP_TSO4)) {
867 if (mask & IFCAP_TXCSUM) {
868 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
869 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
871 "TSO4 requires txcsum, disabling both...\n");
872 } else if (mask & IFCAP_TSO4)
873 ifp->if_capenable &= ~IFCAP_TSO4;
876 /* Enable/disable TXCSUM_IPV6/TSO6 */
877 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
878 && !(ifp->if_capenable & IFCAP_TSO6)) {
879 if (mask & IFCAP_TXCSUM_IPV6) {
880 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
881 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
882 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
883 ifp->if_capenable |= IFCAP_TSO6;
885 } else if (mask & IFCAP_TSO6) {
886 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
887 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
889 "TSO6 requires txcsum6, enabling both...\n");
891 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
892 && !(ifp->if_capenable & IFCAP_TSO6)) {
893 if (mask & IFCAP_TXCSUM_IPV6)
894 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
895 else if (mask & IFCAP_TSO6)
896 ifp->if_capenable |= IFCAP_TSO6;
897 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
898 && (ifp->if_capenable & IFCAP_TSO6)) {
899 if (mask & IFCAP_TXCSUM_IPV6) {
900 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
901 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
903 "TSO6 requires txcsum6, disabling both...\n");
904 } else if (mask & IFCAP_TSO6)
905 ifp->if_capenable &= ~IFCAP_TSO6;
909 /*********************************************************************
912 * ixl_ioctl is called when the user wants to configure the
915 * return 0 on success, positive on failure
916 **********************************************************************/
919 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
921 struct ixl_vsi *vsi = ifp->if_softc;
922 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
923 struct ifreq *ifr = (struct ifreq *) data;
924 #if defined(INET) || defined(INET6)
925 struct ifaddr *ifa = (struct ifaddr *)data;
926 bool avoid_reset = FALSE;
934 if (ifa->ifa_addr->sa_family == AF_INET)
938 if (ifa->ifa_addr->sa_family == AF_INET6)
941 #if defined(INET) || defined(INET6)
943 ** Calling init results in link renegotiation,
944 ** so we avoid doing it when possible.
947 ifp->if_flags |= IFF_UP;
948 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
951 if (!(ifp->if_flags & IFF_NOARP))
952 arp_ifinit(ifp, ifa);
955 error = ether_ioctl(ifp, command, data);
959 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
960 if (ifr->ifr_mtu > IXL_MAX_FRAME -
961 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
965 ifp->if_mtu = ifr->ifr_mtu;
966 vsi->max_frame_size =
967 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
968 + ETHER_VLAN_ENCAP_LEN;
974 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
976 if (ifp->if_flags & IFF_UP) {
977 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
978 if ((ifp->if_flags ^ pf->if_flags) &
979 (IFF_PROMISC | IFF_ALLMULTI)) {
980 ixl_set_promisc(vsi);
985 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
987 pf->if_flags = ifp->if_flags;
991 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
992 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
994 ixl_disable_intr(vsi);
996 ixl_enable_intr(vsi);
1001 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1002 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1004 ixl_disable_intr(vsi);
1006 ixl_enable_intr(vsi);
1012 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1013 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1017 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1018 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1020 ixl_cap_txcsum_tso(vsi, ifp, mask);
1022 if (mask & IFCAP_RXCSUM)
1023 ifp->if_capenable ^= IFCAP_RXCSUM;
1024 if (mask & IFCAP_RXCSUM_IPV6)
1025 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1026 if (mask & IFCAP_LRO)
1027 ifp->if_capenable ^= IFCAP_LRO;
1028 if (mask & IFCAP_VLAN_HWTAGGING)
1029 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1030 if (mask & IFCAP_VLAN_HWFILTER)
1031 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1032 if (mask & IFCAP_VLAN_HWTSO)
1033 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1034 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1036 ixl_init_locked(pf);
1039 VLAN_CAPABILITIES(ifp);
1045 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1046 error = ether_ioctl(ifp, command, data);
1054 /*********************************************************************
1057 * This routine is used in two ways. It is used by the stack as
1058 * init entry point in network interface structure. It is also used
1059 * by the driver as a hw/sw initialization routine to get to a
1062 * return 0 on success, positive on failure
1063 **********************************************************************/
1066 ixl_init_locked(struct ixl_pf *pf)
1068 struct i40e_hw *hw = &pf->hw;
1069 struct ixl_vsi *vsi = &pf->vsi;
1070 struct ifnet *ifp = vsi->ifp;
1071 device_t dev = pf->dev;
1072 struct i40e_filter_control_settings filter;
1073 u8 tmpaddr[ETHER_ADDR_LEN];
1076 mtx_assert(&pf->pf_mtx, MA_OWNED);
1077 INIT_DEBUGOUT("ixl_init: begin");
1080 /* Get the latest mac address... User might use a LAA */
1081 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1082 I40E_ETH_LENGTH_OF_ADDRESS);
1083 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1084 i40e_validate_mac_addr(tmpaddr)) {
1085 bcopy(tmpaddr, hw->mac.addr,
1086 I40E_ETH_LENGTH_OF_ADDRESS);
1087 ret = i40e_aq_mac_address_write(hw,
1088 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1089 hw->mac.addr, NULL);
1091 device_printf(dev, "LLA address"
1092 "change failed!!\n");
1097 /* Set the various hardware offload abilities */
1098 ifp->if_hwassist = 0;
1099 if (ifp->if_capenable & IFCAP_TSO)
1100 ifp->if_hwassist |= CSUM_TSO;
1101 if (ifp->if_capenable & IFCAP_TXCSUM)
1102 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1103 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1104 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1106 /* Set up the device filtering */
1107 bzero(&filter, sizeof(filter));
1108 filter.enable_ethtype = TRUE;
1109 filter.enable_macvlan = TRUE;
1111 filter.enable_fdir = TRUE;
1113 if (i40e_set_filter_control(hw, &filter))
1114 device_printf(dev, "set_filter_control() failed\n");
1117 ixl_config_rss(vsi);
1123 ** Prepare the rings, hmc contexts, etc...
1125 if (ixl_initialize_vsi(vsi)) {
1126 device_printf(dev, "initialize vsi failed!!\n");
1130 /* Add protocol filters to list */
1131 ixl_init_filters(vsi);
1133 /* Setup vlan's if needed */
1134 ixl_setup_vlan_filters(vsi);
1136 /* Start the local timer */
1137 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1139 /* Set up MSI/X routing and the ITR settings */
1140 if (ixl_enable_msix) {
1141 ixl_configure_msix(pf);
1142 ixl_configure_itr(pf);
1144 ixl_configure_legacy(pf);
1146 ixl_enable_rings(vsi);
1148 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1150 /* Set MTU in hardware*/
1151 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1154 device_printf(vsi->dev,
1155 "aq_set_mac_config in init error, code %d\n",
1158 /* And now turn on interrupts */
1159 ixl_enable_intr(vsi);
1161 /* Now inform the stack we're ready */
1162 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1163 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1171 struct ixl_pf *pf = arg;
1174 ixl_init_locked(pf);
1181 ** MSIX Interrupt Handlers and Tasklets
1185 ixl_handle_que(void *context, int pending)
1187 struct ixl_queue *que = context;
1188 struct ixl_vsi *vsi = que->vsi;
1189 struct i40e_hw *hw = vsi->hw;
1190 struct tx_ring *txr = &que->txr;
1191 struct ifnet *ifp = vsi->ifp;
1194 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1195 more = ixl_rxeof(que, IXL_RX_LIMIT);
1198 if (!drbr_empty(ifp, txr->br))
1199 ixl_mq_start_locked(ifp, txr);
1202 taskqueue_enqueue(que->tq, &que->task);
1207 /* Reenable this interrupt - hmmm */
1208 ixl_enable_queue(hw, que->me);
1213 /*********************************************************************
1215 * Legacy Interrupt Service routine
1217 **********************************************************************/
1221 struct ixl_pf *pf = arg;
1222 struct i40e_hw *hw = &pf->hw;
1223 struct ixl_vsi *vsi = &pf->vsi;
1224 struct ixl_queue *que = vsi->queues;
1225 struct ifnet *ifp = vsi->ifp;
1226 struct tx_ring *txr = &que->txr;
1227 u32 reg, icr0, mask;
1228 bool more_tx, more_rx;
1232 /* Protect against spurious interrupts */
1233 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1236 icr0 = rd32(hw, I40E_PFINT_ICR0);
1238 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1239 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1240 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1242 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1244 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1245 taskqueue_enqueue(pf->tq, &pf->adminq);
1249 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1252 more_tx = ixl_txeof(que);
1253 if (!drbr_empty(vsi->ifp, txr->br))
1257 /* re-enable other interrupt causes */
1258 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1260 /* And now the queues */
1261 reg = rd32(hw, I40E_QINT_RQCTL(0));
1262 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1263 wr32(hw, I40E_QINT_RQCTL(0), reg);
1265 reg = rd32(hw, I40E_QINT_TQCTL(0));
1266 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1267 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1268 wr32(hw, I40E_QINT_TQCTL(0), reg);
1270 ixl_enable_legacy(hw);
1276 /*********************************************************************
1278 * MSIX VSI Interrupt Service routine
1280 **********************************************************************/
1282 ixl_msix_que(void *arg)
1284 struct ixl_queue *que = arg;
1285 struct ixl_vsi *vsi = que->vsi;
1286 struct i40e_hw *hw = vsi->hw;
1287 struct tx_ring *txr = &que->txr;
1288 bool more_tx, more_rx;
1290 /* Protect against spurious interrupts */
1291 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1296 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1299 more_tx = ixl_txeof(que);
1301 ** Make certain that if the stack
1302 ** has anything queued the task gets
1303 ** scheduled to handle it.
1305 if (!drbr_empty(vsi->ifp, txr->br))
1309 ixl_set_queue_rx_itr(que);
1310 ixl_set_queue_tx_itr(que);
1312 if (more_tx || more_rx)
1313 taskqueue_enqueue(que->tq, &que->task);
1315 ixl_enable_queue(hw, que->me);
1321 /*********************************************************************
1323 * MSIX Admin Queue Interrupt Service routine
1325 **********************************************************************/
1327 ixl_msix_adminq(void *arg)
1329 struct ixl_pf *pf = arg;
1330 struct i40e_hw *hw = &pf->hw;
1335 reg = rd32(hw, I40E_PFINT_ICR0);
1336 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1338 /* Check on the cause */
1339 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1340 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1342 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1343 ixl_handle_mdd_event(pf);
1344 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1347 if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1348 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1350 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1351 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1352 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1354 taskqueue_enqueue(pf->tq, &pf->adminq);
1358 /*********************************************************************
1360 * Media Ioctl callback
1362 * This routine is called whenever the user queries the status of
1363 * the interface using ifconfig.
1365 **********************************************************************/
1367 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1369 struct ixl_vsi *vsi = ifp->if_softc;
1370 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1371 struct i40e_hw *hw = &pf->hw;
1373 INIT_DEBUGOUT("ixl_media_status: begin");
1376 ixl_update_link_status(pf);
1378 ifmr->ifm_status = IFM_AVALID;
1379 ifmr->ifm_active = IFM_ETHER;
1381 if (!vsi->link_up) {
1386 ifmr->ifm_status |= IFM_ACTIVE;
1387 /* Hardware is always full-duplex */
1388 ifmr->ifm_active |= IFM_FDX;
1390 switch (hw->phy.link_info.phy_type) {
1392 case I40E_PHY_TYPE_100BASE_TX:
1393 ifmr->ifm_active |= IFM_100_TX;
1396 case I40E_PHY_TYPE_1000BASE_T:
1397 ifmr->ifm_active |= IFM_1000_T;
1399 case I40E_PHY_TYPE_1000BASE_SX:
1400 ifmr->ifm_active |= IFM_1000_SX;
1402 case I40E_PHY_TYPE_1000BASE_LX:
1403 ifmr->ifm_active |= IFM_1000_LX;
1406 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1407 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1408 ifmr->ifm_active |= IFM_10G_TWINAX;
1410 case I40E_PHY_TYPE_10GBASE_SR:
1411 ifmr->ifm_active |= IFM_10G_SR;
1413 case I40E_PHY_TYPE_10GBASE_LR:
1414 ifmr->ifm_active |= IFM_10G_LR;
1416 case I40E_PHY_TYPE_10GBASE_T:
1417 ifmr->ifm_active |= IFM_10G_T;
1420 case I40E_PHY_TYPE_40GBASE_CR4:
1421 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1422 ifmr->ifm_active |= IFM_40G_CR4;
1424 case I40E_PHY_TYPE_40GBASE_SR4:
1425 ifmr->ifm_active |= IFM_40G_SR4;
1427 case I40E_PHY_TYPE_40GBASE_LR4:
1428 ifmr->ifm_active |= IFM_40G_LR4;
1431 ifmr->ifm_active |= IFM_UNKNOWN;
1434 /* Report flow control status as well */
1435 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1436 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1437 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1438 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1445 /*********************************************************************
1447 * Media Ioctl callback
1449 * This routine is called when the user changes speed/duplex using
1450 * media/mediopt option with ifconfig.
1452 **********************************************************************/
1454 ixl_media_change(struct ifnet * ifp)
1456 struct ixl_vsi *vsi = ifp->if_softc;
1457 struct ifmedia *ifm = &vsi->media;
1459 INIT_DEBUGOUT("ixl_media_change: begin");
1461 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1464 if_printf(ifp, "Media change is currently not supported.\n");
1472 ** ATR: Application Targetted Receive - creates a filter
1473 ** based on TX flow info that will keep the receive
1474 ** portion of the flow on the same queue. Based on the
1475 ** implementation this is only available for TCP connections
1478 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1480 struct ixl_vsi *vsi = que->vsi;
1481 struct tx_ring *txr = &que->txr;
1482 struct i40e_filter_program_desc *FDIR;
1486 /* check if ATR is enabled and sample rate */
1487 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1490 ** We sample all TCP SYN/FIN packets,
1491 ** or at the selected sample rate
1494 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1495 (txr->atr_count < txr->atr_rate))
1499 /* Get a descriptor to use */
1500 idx = txr->next_avail;
1501 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1502 if (++idx == que->num_desc)
1505 txr->next_avail = idx;
1507 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1508 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1510 ptype |= (etype == ETHERTYPE_IP) ?
1511 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1512 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1513 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1514 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1516 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1518 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1521 ** We use the TCP TH_FIN as a trigger to remove
1522 ** the filter, otherwise its an update.
1524 dtype |= (th->th_flags & TH_FIN) ?
1525 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1526 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1527 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1528 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1530 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1531 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1533 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1534 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1536 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1537 FDIR->dtype_cmd_cntindex = htole32(dtype);
1544 ixl_set_promisc(struct ixl_vsi *vsi)
1546 struct ifnet *ifp = vsi->ifp;
1547 struct i40e_hw *hw = vsi->hw;
1549 bool uni = FALSE, multi = FALSE;
1551 if (ifp->if_flags & IFF_ALLMULTI)
1553 else { /* Need to count the multicast addresses */
1554 struct ifmultiaddr *ifma;
1555 if_maddr_rlock(ifp);
1556 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1557 if (ifma->ifma_addr->sa_family != AF_LINK)
1559 if (mcnt == MAX_MULTICAST_ADDR)
1563 if_maddr_runlock(ifp);
1566 if (mcnt >= MAX_MULTICAST_ADDR)
1568 if (ifp->if_flags & IFF_PROMISC)
1571 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1572 vsi->seid, uni, NULL);
1573 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1574 vsi->seid, multi, NULL);
1578 /*********************************************************************
1581 * Routines for multicast and vlan filter management.
1583 *********************************************************************/
1585 ixl_add_multi(struct ixl_vsi *vsi)
1587 struct ifmultiaddr *ifma;
1588 struct ifnet *ifp = vsi->ifp;
1589 struct i40e_hw *hw = vsi->hw;
1590 int mcnt = 0, flags;
1592 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1594 if_maddr_rlock(ifp);
1596 ** First just get a count, to decide if we
1597 ** we simply use multicast promiscuous.
1599 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1600 if (ifma->ifma_addr->sa_family != AF_LINK)
1604 if_maddr_runlock(ifp);
1606 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1607 /* delete existing MC filters */
1608 ixl_del_hw_filters(vsi, mcnt);
1609 i40e_aq_set_vsi_multicast_promiscuous(hw,
1610 vsi->seid, TRUE, NULL);
1615 if_maddr_rlock(ifp);
1616 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1617 if (ifma->ifma_addr->sa_family != AF_LINK)
1619 ixl_add_mc_filter(vsi,
1620 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1623 if_maddr_runlock(ifp);
1625 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1626 ixl_add_hw_filters(vsi, flags, mcnt);
1629 IOCTL_DEBUGOUT("ixl_add_multi: end");
1634 ixl_del_multi(struct ixl_vsi *vsi)
1636 struct ifnet *ifp = vsi->ifp;
1637 struct ifmultiaddr *ifma;
1638 struct ixl_mac_filter *f;
1642 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1644 /* Search for removed multicast addresses */
1645 if_maddr_rlock(ifp);
1646 SLIST_FOREACH(f, &vsi->ftl, next) {
1647 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1649 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1650 if (ifma->ifma_addr->sa_family != AF_LINK)
1652 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1653 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1658 if (match == FALSE) {
1659 f->flags |= IXL_FILTER_DEL;
1664 if_maddr_runlock(ifp);
1667 ixl_del_hw_filters(vsi, mcnt);
1671 /*********************************************************************
1674 * This routine checks for link status,updates statistics,
1675 * and runs the watchdog check.
1677 **********************************************************************/
1680 ixl_local_timer(void *arg)
1682 struct ixl_pf *pf = arg;
1683 struct i40e_hw *hw = &pf->hw;
1684 struct ixl_vsi *vsi = &pf->vsi;
1685 struct ixl_queue *que = vsi->queues;
1686 device_t dev = pf->dev;
1690 mtx_assert(&pf->pf_mtx, MA_OWNED);
1692 /* Fire off the adminq task */
1693 taskqueue_enqueue(pf->tq, &pf->adminq);
1696 ixl_update_stats_counters(pf);
1699 ** Check status of the queues
1701 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1702 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1704 for (int i = 0; i < vsi->num_queues; i++,que++) {
1705 /* Any queues with outstanding work get a sw irq */
1707 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1709 ** Each time txeof runs without cleaning, but there
1710 ** are uncleaned descriptors it increments busy. If
1711 ** we get to 5 we declare it hung.
1713 if (que->busy == IXL_QUEUE_HUNG) {
1715 /* Mark the queue as inactive */
1716 vsi->active_queues &= ~((u64)1 << que->me);
1719 /* Check if we've come back from hung */
1720 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1721 vsi->active_queues |= ((u64)1 << que->me);
1723 if (que->busy >= IXL_MAX_TX_BUSY) {
1724 device_printf(dev,"Warning queue %d "
1725 "appears to be hung!\n", i);
1726 que->busy = IXL_QUEUE_HUNG;
1730 /* Only reinit if all queues show hung */
1731 if (hung == vsi->num_queues)
1734 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1738 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1739 ixl_init_locked(pf);
1743 ** Note: this routine updates the OS on the link state
1744 ** the real check of the hardware only happens with
1745 ** a link interrupt.
1748 ixl_update_link_status(struct ixl_pf *pf)
1750 struct ixl_vsi *vsi = &pf->vsi;
1751 struct i40e_hw *hw = &pf->hw;
1752 struct ifnet *ifp = vsi->ifp;
1753 device_t dev = pf->dev;
1754 enum i40e_fc_mode fc;
1758 if (vsi->link_active == FALSE) {
1759 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1761 fc = hw->fc.current_mode;
1762 device_printf(dev,"Link is up %d Gbps %s,"
1763 " Flow Control: %s\n",
1764 ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1765 "Full Duplex", ixl_fc_string[fc]);
1767 vsi->link_active = TRUE;
1768 if_link_state_change(ifp, LINK_STATE_UP);
1770 } else { /* Link down */
1771 if (vsi->link_active == TRUE) {
1773 device_printf(dev,"Link is Down\n");
1774 if_link_state_change(ifp, LINK_STATE_DOWN);
1775 vsi->link_active = FALSE;
1782 /*********************************************************************
1784 * This routine disables all traffic on the adapter by issuing a
1785 * global reset on the MAC and deallocates TX/RX buffers.
1787 **********************************************************************/
1790 ixl_stop(struct ixl_pf *pf)
1792 struct ixl_vsi *vsi = &pf->vsi;
1793 struct ifnet *ifp = vsi->ifp;
1795 mtx_assert(&pf->pf_mtx, MA_OWNED);
1797 INIT_DEBUGOUT("ixl_stop: begin\n");
1798 ixl_disable_intr(vsi);
1799 ixl_disable_rings(vsi);
1801 /* Tell the stack that the interface is no longer active */
1802 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1804 /* Stop the local timer */
1805 callout_stop(&pf->timer);
1811 /*********************************************************************
1813 * Setup MSIX Interrupt resources and handlers for the VSI
1815 **********************************************************************/
1817 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1819 device_t dev = pf->dev;
1820 struct ixl_vsi *vsi = &pf->vsi;
1821 struct ixl_queue *que = vsi->queues;
1826 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1827 &rid, RF_SHAREABLE | RF_ACTIVE);
1828 if (pf->res == NULL) {
1829 device_printf(dev,"Unable to allocate"
1830 " bus resource: vsi legacy/msi interrupt\n");
1834 /* Set the handler function */
1835 error = bus_setup_intr(dev, pf->res,
1836 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1837 ixl_intr, pf, &pf->tag);
1840 device_printf(dev, "Failed to register legacy/msi handler");
1843 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1844 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1845 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1846 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1847 taskqueue_thread_enqueue, &que->tq);
1848 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1849 device_get_nameunit(dev));
1850 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1851 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1852 taskqueue_thread_enqueue, &pf->tq);
1853 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1854 device_get_nameunit(dev));
1860 /*********************************************************************
1862 * Setup MSIX Interrupt resources and handlers for the VSI
1864 **********************************************************************/
1866 ixl_assign_vsi_msix(struct ixl_pf *pf)
1868 device_t dev = pf->dev;
1869 struct ixl_vsi *vsi = &pf->vsi;
1870 struct ixl_queue *que = vsi->queues;
1871 struct tx_ring *txr;
1872 int error, rid, vector = 0;
1874 /* Admin Que is vector 0*/
1876 pf->res = bus_alloc_resource_any(dev,
1877 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1879 device_printf(dev,"Unable to allocate"
1880 " bus resource: Adminq interrupt [%d]\n", rid);
1883 /* Set the adminq vector and handler */
1884 error = bus_setup_intr(dev, pf->res,
1885 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1886 ixl_msix_adminq, pf, &pf->tag);
1889 device_printf(dev, "Failed to register Admin que handler");
1892 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1893 pf->admvec = vector;
1894 /* Tasklet for Admin Queue */
1895 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1896 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1897 taskqueue_thread_enqueue, &pf->tq);
1898 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1899 device_get_nameunit(pf->dev));
1902 /* Now set up the stations */
1903 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1906 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1907 RF_SHAREABLE | RF_ACTIVE);
1908 if (que->res == NULL) {
1909 device_printf(dev,"Unable to allocate"
1910 " bus resource: que interrupt [%d]\n", vector);
1913 /* Set the handler function */
1914 error = bus_setup_intr(dev, que->res,
1915 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1916 ixl_msix_que, que, &que->tag);
1919 device_printf(dev, "Failed to register que handler");
1922 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1923 /* Bind the vector to a CPU */
1924 bus_bind_intr(dev, que->res, i);
1926 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1927 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1928 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1929 taskqueue_thread_enqueue, &que->tq);
1930 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1931 device_get_nameunit(pf->dev));
1939 * Allocate MSI/X vectors
1942 ixl_init_msix(struct ixl_pf *pf)
1944 device_t dev = pf->dev;
1945 int rid, want, vectors, queues, available;
1947 /* Override by tuneable */
1948 if (ixl_enable_msix == 0)
1952 ** When used in a virtualized environment
1953 ** PCI BUSMASTER capability may not be set
1954 ** so explicity set it here and rewrite
1955 ** the ENABLE in the MSIX control register
1956 ** at this point to cause the host to
1957 ** successfully initialize us.
1962 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1963 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1964 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1965 pci_find_cap(dev, PCIY_MSIX, &rid);
1966 rid += PCIR_MSIX_CTRL;
1967 msix_ctrl = pci_read_config(dev, rid, 2);
1968 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1969 pci_write_config(dev, rid, msix_ctrl, 2);
1972 /* First try MSI/X */
1973 rid = PCIR_BAR(IXL_BAR);
1974 pf->msix_mem = bus_alloc_resource_any(dev,
1975 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1976 if (!pf->msix_mem) {
1977 /* May not be enabled */
1978 device_printf(pf->dev,
1979 "Unable to map MSIX table \n");
1983 available = pci_msix_count(dev);
1984 if (available == 0) { /* system has msix disabled */
1985 bus_release_resource(dev, SYS_RES_MEMORY,
1987 pf->msix_mem = NULL;
1991 /* Figure out a reasonable auto config value */
1992 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1994 /* Override with hardcoded value if sane */
1995 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1996 queues = ixl_max_queues;
1999 ** Want one vector (RX/TX pair) per queue
2000 ** plus an additional for the admin queue.
2003 if (want <= available) /* Have enough */
2006 device_printf(pf->dev,
2007 "MSIX Configuration Problem, "
2008 "%d vectors available but %d wanted!\n",
2010 return (0); /* Will go to Legacy setup */
2013 if (pci_alloc_msix(dev, &vectors) == 0) {
2014 device_printf(pf->dev,
2015 "Using MSIX interrupts with %d vectors\n", vectors);
2017 pf->vsi.num_queues = queues;
2021 vectors = pci_msi_count(dev);
2022 pf->vsi.num_queues = 1;
2025 ixl_enable_msix = 0;
2026 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2027 device_printf(pf->dev,"Using an MSI interrupt\n");
2030 device_printf(pf->dev,"Using a Legacy interrupt\n");
2037 * Plumb MSI/X vectors
2040 ixl_configure_msix(struct ixl_pf *pf)
2042 struct i40e_hw *hw = &pf->hw;
2043 struct ixl_vsi *vsi = &pf->vsi;
2047 /* First set up the adminq - vector 0 */
2048 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2049 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2051 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2052 I40E_PFINT_ICR0_ENA_GRST_MASK |
2053 I40E_PFINT_ICR0_HMC_ERR_MASK |
2054 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2055 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2056 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2057 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2058 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2060 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2061 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2063 wr32(hw, I40E_PFINT_DYN_CTL0,
2064 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2065 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2067 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2069 /* Next configure the queues */
2070 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2071 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2072 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2074 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2075 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2076 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2077 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2078 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2079 wr32(hw, I40E_QINT_RQCTL(i), reg);
2081 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2082 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2083 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2084 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2085 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2086 if (i == (vsi->num_queues - 1))
2087 reg |= (IXL_QUEUE_EOL
2088 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2089 wr32(hw, I40E_QINT_TQCTL(i), reg);
2094 * Configure for MSI single vector operation
2097 ixl_configure_legacy(struct ixl_pf *pf)
2099 struct i40e_hw *hw = &pf->hw;
2103 wr32(hw, I40E_PFINT_ITR0(0), 0);
2104 wr32(hw, I40E_PFINT_ITR0(1), 0);
2107 /* Setup "other" causes */
2108 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2109 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2110 | I40E_PFINT_ICR0_ENA_GRST_MASK
2111 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2112 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2113 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2114 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2115 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2116 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2117 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2119 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2121 /* SW_ITR_IDX = 0, but don't change INTENA */
2122 wr32(hw, I40E_PFINT_DYN_CTL0,
2123 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2124 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2125 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2126 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2128 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2129 wr32(hw, I40E_PFINT_LNKLST0, 0);
2131 /* Associate the queue pair to the vector and enable the q int */
2132 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2133 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2134 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2135 wr32(hw, I40E_QINT_RQCTL(0), reg);
2137 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2138 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2139 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2140 wr32(hw, I40E_QINT_TQCTL(0), reg);
2142 /* Next enable the queue pair */
2143 reg = rd32(hw, I40E_QTX_ENA(0));
2144 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2145 wr32(hw, I40E_QTX_ENA(0), reg);
2147 reg = rd32(hw, I40E_QRX_ENA(0));
2148 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2149 wr32(hw, I40E_QRX_ENA(0), reg);
2154 * Set the Initial ITR state
2157 ixl_configure_itr(struct ixl_pf *pf)
2159 struct i40e_hw *hw = &pf->hw;
2160 struct ixl_vsi *vsi = &pf->vsi;
2161 struct ixl_queue *que = vsi->queues;
2163 vsi->rx_itr_setting = ixl_rx_itr;
2164 if (ixl_dynamic_rx_itr)
2165 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2166 vsi->tx_itr_setting = ixl_tx_itr;
2167 if (ixl_dynamic_tx_itr)
2168 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2170 for (int i = 0; i < vsi->num_queues; i++, que++) {
2171 struct tx_ring *txr = &que->txr;
2172 struct rx_ring *rxr = &que->rxr;
2174 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2175 vsi->rx_itr_setting);
2176 rxr->itr = vsi->rx_itr_setting;
2177 rxr->latency = IXL_AVE_LATENCY;
2178 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2179 vsi->tx_itr_setting);
2180 txr->itr = vsi->tx_itr_setting;
2181 txr->latency = IXL_AVE_LATENCY;
2187 ixl_allocate_pci_resources(struct ixl_pf *pf)
2190 device_t dev = pf->dev;
2193 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2196 if (!(pf->pci_mem)) {
2197 device_printf(dev,"Unable to allocate bus resource: memory\n");
2201 pf->osdep.mem_bus_space_tag =
2202 rman_get_bustag(pf->pci_mem);
2203 pf->osdep.mem_bus_space_handle =
2204 rman_get_bushandle(pf->pci_mem);
2205 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2206 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2207 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2209 pf->hw.back = &pf->osdep;
2212 ** Now setup MSI or MSI/X, should
2213 ** return us the number of supported
2214 ** vectors. (Will be 1 for MSI)
2216 pf->msix = ixl_init_msix(pf);
2221 ixl_free_pci_resources(struct ixl_pf * pf)
2223 struct ixl_vsi *vsi = &pf->vsi;
2224 struct ixl_queue *que = vsi->queues;
2225 device_t dev = pf->dev;
2228 memrid = PCIR_BAR(IXL_BAR);
2230 /* We may get here before stations are setup */
2231 if ((!ixl_enable_msix) || (que == NULL))
2235 ** Release all msix VSI resources:
2237 for (int i = 0; i < vsi->num_queues; i++, que++) {
2238 rid = que->msix + 1;
2239 if (que->tag != NULL) {
2240 bus_teardown_intr(dev, que->res, que->tag);
2243 if (que->res != NULL)
2244 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2248 /* Clean the AdminQ interrupt last */
2249 if (pf->admvec) /* we are doing MSIX */
2250 rid = pf->admvec + 1;
2252 (pf->msix != 0) ? (rid = 1):(rid = 0);
2254 if (pf->tag != NULL) {
2255 bus_teardown_intr(dev, pf->res, pf->tag);
2258 if (pf->res != NULL)
2259 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2262 pci_release_msi(dev);
2264 if (pf->msix_mem != NULL)
2265 bus_release_resource(dev, SYS_RES_MEMORY,
2266 memrid, pf->msix_mem);
2268 if (pf->pci_mem != NULL)
2269 bus_release_resource(dev, SYS_RES_MEMORY,
2270 PCIR_BAR(0), pf->pci_mem);
2276 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2278 /* Display supported media types */
2279 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2280 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2282 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2283 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2285 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2286 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2287 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2288 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2289 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2290 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2291 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2292 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2293 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2295 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2296 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2297 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2298 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2299 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2300 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2301 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2304 /*********************************************************************
2306 * Setup networking device structure and register an interface.
2308 **********************************************************************/
2310 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2313 struct i40e_hw *hw = vsi->hw;
2314 struct ixl_queue *que = vsi->queues;
2315 struct i40e_aq_get_phy_abilities_resp abilities_resp;
2316 enum i40e_status_code aq_error = 0;
2318 INIT_DEBUGOUT("ixl_setup_interface: begin");
2320 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2322 device_printf(dev, "can not allocate ifnet structure\n");
2325 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2326 ifp->if_mtu = ETHERMTU;
2327 ifp->if_baudrate = 4000000000; // ??
2328 ifp->if_init = ixl_init;
2329 ifp->if_softc = vsi;
2330 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2331 ifp->if_ioctl = ixl_ioctl;
2333 #if __FreeBSD_version >= 1100036
2334 if_setgetcounterfn(ifp, ixl_get_counter);
2337 ifp->if_transmit = ixl_mq_start;
2339 ifp->if_qflush = ixl_qflush;
2341 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2343 vsi->max_frame_size =
2344 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2345 + ETHER_VLAN_ENCAP_LEN;
2348 * Tell the upper layer(s) we support long frames.
2350 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2352 ifp->if_capabilities |= IFCAP_HWCSUM;
2353 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2354 ifp->if_capabilities |= IFCAP_TSO;
2355 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2356 ifp->if_capabilities |= IFCAP_LRO;
2358 /* VLAN capabilties */
2359 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2362 | IFCAP_VLAN_HWCSUM;
2363 ifp->if_capenable = ifp->if_capabilities;
2366 ** Don't turn this on by default, if vlans are
2367 ** created on another pseudo device (eg. lagg)
2368 ** then vlan events are not passed thru, breaking
2369 ** operation, but with HW FILTER off it works. If
2370 ** using vlans directly on the ixl driver you can
2371 ** enable this and get full hardware tag filtering.
2373 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2376 * Specify the media types supported by this adapter and register
2377 * callbacks to update media and link information
2379 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2382 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2383 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2384 /* Need delay to detect fiber correctly */
2385 i40e_msec_delay(200);
2386 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2387 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2388 device_printf(dev, "Unknown PHY type detected!\n");
2390 ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2391 } else if (aq_error) {
2392 device_printf(dev, "Error getting supported media types, err %d,"
2393 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2395 ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2397 /* Use autoselect media by default */
2398 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2399 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2401 ether_ifattach(ifp, hw->mac.addr);
2407 ixl_config_link(struct i40e_hw *hw)
2411 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2412 check = i40e_get_link_status(hw);
2414 printf("Link is %s\n", check ? "up":"down");
2419 /*********************************************************************
2421 * Initialize this VSI
2423 **********************************************************************/
2425 ixl_setup_vsi(struct ixl_vsi *vsi)
2427 struct i40e_hw *hw = vsi->hw;
2428 device_t dev = vsi->dev;
2429 struct i40e_aqc_get_switch_config_resp *sw_config;
2430 struct i40e_vsi_context ctxt;
2431 u8 aq_buf[I40E_AQ_LARGE_BUF];
2432 int ret = I40E_SUCCESS;
2435 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2436 ret = i40e_aq_get_switch_config(hw, sw_config,
2437 sizeof(aq_buf), &next, NULL);
2439 device_printf(dev,"aq_get_switch_config failed!!\n");
2443 printf("Switch config: header reported: %d in structure, %d total\n",
2444 sw_config->header.num_reported, sw_config->header.num_total);
2445 printf("type=%d seid=%d uplink=%d downlink=%d\n",
2446 sw_config->element[0].element_type,
2447 sw_config->element[0].seid,
2448 sw_config->element[0].uplink_seid,
2449 sw_config->element[0].downlink_seid);
2451 /* Save off this important value */
2452 vsi->seid = sw_config->element[0].seid;
2454 memset(&ctxt, 0, sizeof(ctxt));
2455 ctxt.seid = vsi->seid;
2456 ctxt.pf_num = hw->pf_id;
2457 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2459 device_printf(dev,"get vsi params failed %x!!\n", ret);
2463 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2464 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2465 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2466 ctxt.uplink_seid, ctxt.vsi_number,
2467 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2468 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2469 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2472 ** Set the queue and traffic class bits
2473 ** - when multiple traffic classes are supported
2474 ** this will need to be more robust.
2476 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2477 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2478 ctxt.info.queue_mapping[0] = 0;
2479 ctxt.info.tc_mapping[0] = 0x0800;
2481 /* Set VLAN receive stripping mode */
2482 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2483 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2484 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2485 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2487 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2489 /* Keep copy of VSI info in VSI for statistic counters */
2490 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2492 /* Reset VSI statistics */
2493 ixl_vsi_reset_stats(vsi);
2494 vsi->hw_filters_add = 0;
2495 vsi->hw_filters_del = 0;
2497 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2499 device_printf(dev,"update vsi params failed %x!!\n",
2500 hw->aq.asq_last_status);
2505 /*********************************************************************
2507 * Initialize the VSI: this handles contexts, which means things
2508 * like the number of descriptors, buffer size,
2509 * plus we init the rings thru this function.
2511 **********************************************************************/
2513 ixl_initialize_vsi(struct ixl_vsi *vsi)
2515 struct ixl_queue *que = vsi->queues;
2516 device_t dev = vsi->dev;
2517 struct i40e_hw *hw = vsi->hw;
2521 for (int i = 0; i < vsi->num_queues; i++, que++) {
2522 struct tx_ring *txr = &que->txr;
2523 struct rx_ring *rxr = &que->rxr;
2524 struct i40e_hmc_obj_txq tctx;
2525 struct i40e_hmc_obj_rxq rctx;
2530 /* Setup the HMC TX Context */
2531 size = que->num_desc * sizeof(struct i40e_tx_desc);
2532 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2533 tctx.new_context = 1;
2534 tctx.base = (txr->dma.pa/128);
2535 tctx.qlen = que->num_desc;
2537 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2538 /* Enable HEAD writeback */
2539 tctx.head_wb_ena = 1;
2540 tctx.head_wb_addr = txr->dma.pa +
2541 (que->num_desc * sizeof(struct i40e_tx_desc));
2542 tctx.rdylist_act = 0;
2543 err = i40e_clear_lan_tx_queue_context(hw, i);
2545 device_printf(dev, "Unable to clear TX context\n");
2548 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2550 device_printf(dev, "Unable to set TX context\n");
2553 /* Associate the ring with this PF */
2554 txctl = I40E_QTX_CTL_PF_QUEUE;
2555 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2556 I40E_QTX_CTL_PF_INDX_MASK);
2557 wr32(hw, I40E_QTX_CTL(i), txctl);
2560 /* Do ring (re)init */
2561 ixl_init_tx_ring(que);
2563 /* Next setup the HMC RX Context */
2564 if (vsi->max_frame_size <= 2048)
2565 rxr->mbuf_sz = MCLBYTES;
2567 rxr->mbuf_sz = MJUMPAGESIZE;
2569 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2571 /* Set up an RX context for the HMC */
2572 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2573 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2574 /* ignore header split for now */
2575 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2576 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2577 vsi->max_frame_size : max_rxmax;
2579 rctx.dsize = 1; /* do 32byte descriptors */
2580 rctx.hsplit_0 = 0; /* no HDR split initially */
2581 rctx.base = (rxr->dma.pa/128);
2582 rctx.qlen = que->num_desc;
2583 rctx.tphrdesc_ena = 1;
2584 rctx.tphwdesc_ena = 1;
2585 rctx.tphdata_ena = 0;
2586 rctx.tphhead_ena = 0;
2587 rctx.lrxqthresh = 2;
2594 err = i40e_clear_lan_rx_queue_context(hw, i);
2597 "Unable to clear RX context %d\n", i);
2600 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2602 device_printf(dev, "Unable to set RX context %d\n", i);
2605 err = ixl_init_rx_ring(que);
2607 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2610 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2611 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2617 /*********************************************************************
2619 * Free all VSI structs.
2621 **********************************************************************/
2623 ixl_free_vsi(struct ixl_vsi *vsi)
2625 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2626 struct ixl_queue *que = vsi->queues;
2627 struct ixl_mac_filter *f;
2629 /* Free station queues */
2630 for (int i = 0; i < vsi->num_queues; i++, que++) {
2631 struct tx_ring *txr = &que->txr;
2632 struct rx_ring *rxr = &que->rxr;
2634 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2637 ixl_free_que_tx(que);
2639 i40e_free_dma_mem(&pf->hw, &txr->dma);
2641 IXL_TX_LOCK_DESTROY(txr);
2643 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2646 ixl_free_que_rx(que);
2648 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2650 IXL_RX_LOCK_DESTROY(rxr);
2653 free(vsi->queues, M_DEVBUF);
2655 /* Free VSI filter list */
2656 while (!SLIST_EMPTY(&vsi->ftl)) {
2657 f = SLIST_FIRST(&vsi->ftl);
2658 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2664 /*********************************************************************
2666 * Allocate memory for the VSI (virtual station interface) and their
2667 * associated queues, rings and the descriptors associated with each,
2668 * called only once at attach.
2670 **********************************************************************/
2672 ixl_setup_stations(struct ixl_pf *pf)
2674 device_t dev = pf->dev;
2675 struct ixl_vsi *vsi;
2676 struct ixl_queue *que;
2677 struct tx_ring *txr;
2678 struct rx_ring *rxr;
2680 int error = I40E_SUCCESS;
2683 vsi->back = (void *)pf;
2688 /* Get memory for the station queues */
2690 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2691 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2692 device_printf(dev, "Unable to allocate queue memory\n");
2697 for (int i = 0; i < vsi->num_queues; i++) {
2698 que = &vsi->queues[i];
2699 que->num_desc = ixl_ringsz;
2702 /* mark the queue as active */
2703 vsi->active_queues |= (u64)1 << que->me;
2706 txr->tail = I40E_QTX_TAIL(que->me);
2708 /* Initialize the TX lock */
2709 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2710 device_get_nameunit(dev), que->me);
2711 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2712 /* Create the TX descriptor ring */
2713 tsize = roundup2((que->num_desc *
2714 sizeof(struct i40e_tx_desc)) +
2715 sizeof(u32), DBA_ALIGN);
2716 if (i40e_allocate_dma_mem(&pf->hw,
2717 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2719 "Unable to allocate TX Descriptor memory\n");
2723 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2724 bzero((void *)txr->base, tsize);
2725 /* Now allocate transmit soft structs for the ring */
2726 if (ixl_allocate_tx_data(que)) {
2728 "Critical Failure setting up TX structures\n");
2732 /* Allocate a buf ring */
2733 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2734 M_WAITOK, &txr->mtx);
2735 if (txr->br == NULL) {
2737 "Critical Failure setting up TX buf ring\n");
2743 * Next the RX queues...
2745 rsize = roundup2(que->num_desc *
2746 sizeof(union i40e_rx_desc), DBA_ALIGN);
2749 rxr->tail = I40E_QRX_TAIL(que->me);
2751 /* Initialize the RX side lock */
2752 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2753 device_get_nameunit(dev), que->me);
2754 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2756 if (i40e_allocate_dma_mem(&pf->hw,
2757 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2759 "Unable to allocate RX Descriptor memory\n");
2763 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2764 bzero((void *)rxr->base, rsize);
2766 /* Allocate receive soft structs for the ring*/
2767 if (ixl_allocate_rx_data(que)) {
2769 "Critical Failure setting up receive structs\n");
2778 for (int i = 0; i < vsi->num_queues; i++) {
2779 que = &vsi->queues[i];
2783 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2785 i40e_free_dma_mem(&pf->hw, &txr->dma);
2793 ** Provide a update to the queue RX
2794 ** interrupt moderation value.
2797 ixl_set_queue_rx_itr(struct ixl_queue *que)
2799 struct ixl_vsi *vsi = que->vsi;
2800 struct i40e_hw *hw = vsi->hw;
2801 struct rx_ring *rxr = &que->rxr;
2807 /* Idle, do nothing */
2808 if (rxr->bytes == 0)
2811 if (ixl_dynamic_rx_itr) {
2812 rx_bytes = rxr->bytes/rxr->itr;
2815 /* Adjust latency range */
2816 switch (rxr->latency) {
2817 case IXL_LOW_LATENCY:
2818 if (rx_bytes > 10) {
2819 rx_latency = IXL_AVE_LATENCY;
2820 rx_itr = IXL_ITR_20K;
2823 case IXL_AVE_LATENCY:
2824 if (rx_bytes > 20) {
2825 rx_latency = IXL_BULK_LATENCY;
2826 rx_itr = IXL_ITR_8K;
2827 } else if (rx_bytes <= 10) {
2828 rx_latency = IXL_LOW_LATENCY;
2829 rx_itr = IXL_ITR_100K;
2832 case IXL_BULK_LATENCY:
2833 if (rx_bytes <= 20) {
2834 rx_latency = IXL_AVE_LATENCY;
2835 rx_itr = IXL_ITR_20K;
2840 rxr->latency = rx_latency;
2842 if (rx_itr != rxr->itr) {
2843 /* do an exponential smoothing */
2844 rx_itr = (10 * rx_itr * rxr->itr) /
2845 ((9 * rx_itr) + rxr->itr);
2846 rxr->itr = rx_itr & IXL_MAX_ITR;
2847 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2848 que->me), rxr->itr);
2850 } else { /* We may have have toggled to non-dynamic */
2851 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2852 vsi->rx_itr_setting = ixl_rx_itr;
2853 /* Update the hardware if needed */
2854 if (rxr->itr != vsi->rx_itr_setting) {
2855 rxr->itr = vsi->rx_itr_setting;
2856 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2857 que->me), rxr->itr);
2867 ** Provide a update to the queue TX
2868 ** interrupt moderation value.
2871 ixl_set_queue_tx_itr(struct ixl_queue *que)
2873 struct ixl_vsi *vsi = que->vsi;
2874 struct i40e_hw *hw = vsi->hw;
2875 struct tx_ring *txr = &que->txr;
2881 /* Idle, do nothing */
2882 if (txr->bytes == 0)
2885 if (ixl_dynamic_tx_itr) {
2886 tx_bytes = txr->bytes/txr->itr;
2889 switch (txr->latency) {
2890 case IXL_LOW_LATENCY:
2891 if (tx_bytes > 10) {
2892 tx_latency = IXL_AVE_LATENCY;
2893 tx_itr = IXL_ITR_20K;
2896 case IXL_AVE_LATENCY:
2897 if (tx_bytes > 20) {
2898 tx_latency = IXL_BULK_LATENCY;
2899 tx_itr = IXL_ITR_8K;
2900 } else if (tx_bytes <= 10) {
2901 tx_latency = IXL_LOW_LATENCY;
2902 tx_itr = IXL_ITR_100K;
2905 case IXL_BULK_LATENCY:
2906 if (tx_bytes <= 20) {
2907 tx_latency = IXL_AVE_LATENCY;
2908 tx_itr = IXL_ITR_20K;
2913 txr->latency = tx_latency;
2915 if (tx_itr != txr->itr) {
2916 /* do an exponential smoothing */
2917 tx_itr = (10 * tx_itr * txr->itr) /
2918 ((9 * tx_itr) + txr->itr);
2919 txr->itr = tx_itr & IXL_MAX_ITR;
2920 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2921 que->me), txr->itr);
2924 } else { /* We may have have toggled to non-dynamic */
2925 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2926 vsi->tx_itr_setting = ixl_tx_itr;
2927 /* Update the hardware if needed */
2928 if (txr->itr != vsi->tx_itr_setting) {
2929 txr->itr = vsi->tx_itr_setting;
2930 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2931 que->me), txr->itr);
2941 ixl_add_hw_stats(struct ixl_pf *pf)
2943 device_t dev = pf->dev;
2944 struct ixl_vsi *vsi = &pf->vsi;
2945 struct ixl_queue *queues = vsi->queues;
2946 struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2947 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2949 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2950 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2951 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2953 struct sysctl_oid *vsi_node, *queue_node;
2954 struct sysctl_oid_list *vsi_list, *queue_list;
2956 struct tx_ring *txr;
2957 struct rx_ring *rxr;
2959 /* Driver statistics */
2960 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2961 CTLFLAG_RD, &pf->watchdog_events,
2962 "Watchdog timeouts");
2963 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2964 CTLFLAG_RD, &pf->admin_irq,
2965 "Admin Queue IRQ Handled");
2967 /* VSI statistics */
2968 #define QUEUE_NAME_LEN 32
2969 char queue_namebuf[QUEUE_NAME_LEN];
2971 // ERJ: Only one vsi now, re-do when >1 VSI enabled
2972 // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2973 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2974 CTLFLAG_RD, NULL, "VSI-specific stats");
2975 vsi_list = SYSCTL_CHILDREN(vsi_node);
2977 ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2979 /* Queue statistics */
2980 for (int q = 0; q < vsi->num_queues; q++) {
2981 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2982 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2983 CTLFLAG_RD, NULL, "Queue #");
2984 queue_list = SYSCTL_CHILDREN(queue_node);
2986 txr = &(queues[q].txr);
2987 rxr = &(queues[q].rxr);
2989 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2990 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2991 "m_defrag() failed");
2992 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2993 CTLFLAG_RD, &(queues[q].dropped_pkts),
2994 "Driver dropped packets");
2995 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2996 CTLFLAG_RD, &(queues[q].irqs),
2997 "irqs on this queue");
2998 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2999 CTLFLAG_RD, &(queues[q].tso),
3001 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3002 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3003 "Driver tx dma failure in xmit");
3004 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3005 CTLFLAG_RD, &(txr->no_desc),
3006 "Queue No Descriptor Available");
3007 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3008 CTLFLAG_RD, &(txr->total_packets),
3009 "Queue Packets Transmitted");
3010 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3011 CTLFLAG_RD, &(txr->tx_bytes),
3012 "Queue Bytes Transmitted");
3013 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3014 CTLFLAG_RD, &(rxr->rx_packets),
3015 "Queue Packets Received");
3016 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3017 CTLFLAG_RD, &(rxr->rx_bytes),
3018 "Queue Bytes Received");
3022 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3026 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3027 struct sysctl_oid_list *child,
3028 struct i40e_eth_stats *eth_stats)
3030 struct ixl_sysctl_info ctls[] =
3032 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3033 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3034 "Unicast Packets Received"},
3035 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3036 "Multicast Packets Received"},
3037 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3038 "Broadcast Packets Received"},
3039 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3040 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3041 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3042 {ð_stats->tx_multicast, "mcast_pkts_txd",
3043 "Multicast Packets Transmitted"},
3044 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3045 "Broadcast Packets Transmitted"},
3046 {ð_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3051 struct ixl_sysctl_info *entry = ctls;
3052 while (entry->stat != 0)
3054 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3055 CTLFLAG_RD, entry->stat,
3056 entry->description);
3062 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3063 struct sysctl_oid_list *child,
3064 struct i40e_hw_port_stats *stats)
3066 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3067 CTLFLAG_RD, NULL, "Mac Statistics");
3068 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3070 struct i40e_eth_stats *eth_stats = &stats->eth;
3071 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3073 struct ixl_sysctl_info ctls[] =
3075 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3076 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3077 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3078 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3079 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3080 /* Packet Reception Stats */
3081 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3082 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3083 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3084 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3085 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3086 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3087 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3088 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3089 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3090 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3091 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3092 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3093 /* Packet Transmission Stats */
3094 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3095 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3096 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3097 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3098 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3099 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3100 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3102 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3103 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3104 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3105 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3110 struct ixl_sysctl_info *entry = ctls;
3111 while (entry->stat != 0)
3113 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3114 CTLFLAG_RD, entry->stat,
3115 entry->description);
3121 ** ixl_config_rss - setup RSS
3122 ** - note this is done for the single vsi
3124 static void ixl_config_rss(struct ixl_vsi *vsi)
3126 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3127 struct i40e_hw *hw = vsi->hw;
3132 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3133 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3134 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3135 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3137 /* Fill out hash function seed */
3138 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3139 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3141 /* Enable PCTYPES for RSS: */
3143 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3144 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3145 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3146 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3147 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3148 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3149 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3150 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3151 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3152 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3153 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3155 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3156 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3158 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3159 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3161 /* Populate the LUT with max no. of queues in round robin fashion */
3162 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3163 if (j == vsi->num_queues)
3165 /* lut = 4-byte sliding window of 4 lut entries */
3166 lut = (lut << 8) | (j &
3167 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3168 /* On i = 3, we have 4 entries in lut; write to the register */
3170 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3177 ** This routine is run via an vlan config EVENT,
3178 ** it enables us to use the HW Filter table since
3179 ** we can get the vlan id. This just creates the
3180 ** entry in the soft version of the VFTA, init will
3181 ** repopulate the real table.
3184 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3186 struct ixl_vsi *vsi = ifp->if_softc;
3187 struct i40e_hw *hw = vsi->hw;
3188 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3190 if (ifp->if_softc != arg) /* Not our event */
3193 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3198 ixl_add_filter(vsi, hw->mac.addr, vtag);
3203 ** This routine is run via an vlan
3204 ** unconfig EVENT, remove our entry
3205 ** in the soft vfta.
3208 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3210 struct ixl_vsi *vsi = ifp->if_softc;
3211 struct i40e_hw *hw = vsi->hw;
3212 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3214 if (ifp->if_softc != arg)
3217 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3222 ixl_del_filter(vsi, hw->mac.addr, vtag);
3227 ** This routine updates vlan filters, called by init
3228 ** it scans the filter table and then updates the hw
3229 ** after a soft reset.
3232 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3234 struct ixl_mac_filter *f;
3237 if (vsi->num_vlans == 0)
3240 ** Scan the filter list for vlan entries,
3241 ** mark them for addition and then call
3242 ** for the AQ update.
3244 SLIST_FOREACH(f, &vsi->ftl, next) {
3245 if (f->flags & IXL_FILTER_VLAN) {
3253 printf("setup vlan: no filters found!\n");
3256 flags = IXL_FILTER_VLAN;
3257 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3258 ixl_add_hw_filters(vsi, flags, cnt);
3263 ** Initialize filter list and add filters that the hardware
3264 ** needs to know about.
3267 ixl_init_filters(struct ixl_vsi *vsi)
3269 /* Add broadcast address */
3270 u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3271 ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3275 ** This routine adds mulicast filters
3278 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3280 struct ixl_mac_filter *f;
3282 /* Does one already exist */
3283 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3287 f = ixl_get_filter(vsi);
3289 printf("WARNING: no filter available!!\n");
3292 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3293 f->vlan = IXL_VLAN_ANY;
3294 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3301 ** This routine adds macvlan filters
3304 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3306 struct ixl_mac_filter *f, *tmp;
3307 device_t dev = vsi->dev;
3309 DEBUGOUT("ixl_add_filter: begin");
3311 /* Does one already exist */
3312 f = ixl_find_filter(vsi, macaddr, vlan);
3316 ** Is this the first vlan being registered, if so we
3317 ** need to remove the ANY filter that indicates we are
3318 ** not in a vlan, and replace that with a 0 filter.
3320 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3321 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3323 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3324 ixl_add_filter(vsi, macaddr, 0);
3328 f = ixl_get_filter(vsi);
3330 device_printf(dev, "WARNING: no filter available!!\n");
3333 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3335 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3336 if (f->vlan != IXL_VLAN_ANY)
3337 f->flags |= IXL_FILTER_VLAN;
3339 ixl_add_hw_filters(vsi, f->flags, 1);
3344 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3346 struct ixl_mac_filter *f;
3348 f = ixl_find_filter(vsi, macaddr, vlan);
3352 f->flags |= IXL_FILTER_DEL;
3353 ixl_del_hw_filters(vsi, 1);
3355 /* Check if this is the last vlan removal */
3356 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3357 /* Switch back to a non-vlan filter */
3358 ixl_del_filter(vsi, macaddr, 0);
3359 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3365 ** Find the filter with both matching mac addr and vlan id
3367 static struct ixl_mac_filter *
3368 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3370 struct ixl_mac_filter *f;
3373 SLIST_FOREACH(f, &vsi->ftl, next) {
3374 if (!cmp_etheraddr(f->macaddr, macaddr))
3376 if (f->vlan == vlan) {
3388 ** This routine takes additions to the vsi filter
3389 ** table and creates an Admin Queue call to create
3390 ** the filters in the hardware.
3393 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3395 struct i40e_aqc_add_macvlan_element_data *a, *b;
3396 struct ixl_mac_filter *f;
3397 struct i40e_hw *hw = vsi->hw;
3398 device_t dev = vsi->dev;
3401 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3402 M_DEVBUF, M_NOWAIT | M_ZERO);
3404 device_printf(dev, "add hw filter failed to get memory\n");
3409 ** Scan the filter list, each time we find one
3410 ** we add it to the admin queue array and turn off
3413 SLIST_FOREACH(f, &vsi->ftl, next) {
3414 if (f->flags == flags) {
3415 b = &a[j]; // a pox on fvl long names :)
3416 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3418 (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3419 b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3420 f->flags &= ~IXL_FILTER_ADD;
3427 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3429 device_printf(dev, "aq_add_macvlan failure %d\n",
3430 hw->aq.asq_last_status);
3432 vsi->hw_filters_add += j;
3439 ** This routine takes removals in the vsi filter
3440 ** table and creates an Admin Queue call to delete
3441 ** the filters in the hardware.
3444 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3446 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3447 struct i40e_hw *hw = vsi->hw;
3448 device_t dev = vsi->dev;
3449 struct ixl_mac_filter *f, *f_temp;
3452 DEBUGOUT("ixl_del_hw_filters: begin\n");
3454 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3455 M_DEVBUF, M_NOWAIT | M_ZERO);
3457 printf("del hw filter failed to get memory\n");
3461 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3462 if (f->flags & IXL_FILTER_DEL) {
3463 e = &d[j]; // a pox on fvl long names :)
3464 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3465 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3466 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3467 /* delete entry from vsi list */
3468 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3476 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3477 /* NOTE: returns ENOENT every time but seems to work fine,
3478 so we'll ignore that specific error. */
3479 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3481 for (int i = 0; i < j; i++)
3482 sc += (!d[i].error_code);
3483 vsi->hw_filters_del += sc;
3485 "Failed to remove %d/%d filters, aq error %d\n",
3486 j - sc, j, hw->aq.asq_last_status);
3488 vsi->hw_filters_del += j;
3492 DEBUGOUT("ixl_del_hw_filters: end\n");
3498 ixl_enable_rings(struct ixl_vsi *vsi)
3500 struct i40e_hw *hw = vsi->hw;
3503 for (int i = 0; i < vsi->num_queues; i++) {
3504 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3506 reg = rd32(hw, I40E_QTX_ENA(i));
3507 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3508 I40E_QTX_ENA_QENA_STAT_MASK;
3509 wr32(hw, I40E_QTX_ENA(i), reg);
3510 /* Verify the enable took */
3511 for (int j = 0; j < 10; j++) {
3512 reg = rd32(hw, I40E_QTX_ENA(i));
3513 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3515 i40e_msec_delay(10);
3517 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3518 printf("TX queue %d disabled!\n", i);
3520 reg = rd32(hw, I40E_QRX_ENA(i));
3521 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3522 I40E_QRX_ENA_QENA_STAT_MASK;
3523 wr32(hw, I40E_QRX_ENA(i), reg);
3524 /* Verify the enable took */
3525 for (int j = 0; j < 10; j++) {
3526 reg = rd32(hw, I40E_QRX_ENA(i));
3527 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3529 i40e_msec_delay(10);
3531 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3532 printf("RX queue %d disabled!\n", i);
3537 ixl_disable_rings(struct ixl_vsi *vsi)
3539 struct i40e_hw *hw = vsi->hw;
3542 for (int i = 0; i < vsi->num_queues; i++) {
3543 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3544 i40e_usec_delay(500);
3546 reg = rd32(hw, I40E_QTX_ENA(i));
3547 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3548 wr32(hw, I40E_QTX_ENA(i), reg);
3549 /* Verify the disable took */
3550 for (int j = 0; j < 10; j++) {
3551 reg = rd32(hw, I40E_QTX_ENA(i));
3552 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3554 i40e_msec_delay(10);
3556 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3557 printf("TX queue %d still enabled!\n", i);
3559 reg = rd32(hw, I40E_QRX_ENA(i));
3560 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3561 wr32(hw, I40E_QRX_ENA(i), reg);
3562 /* Verify the disable took */
3563 for (int j = 0; j < 10; j++) {
3564 reg = rd32(hw, I40E_QRX_ENA(i));
3565 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3567 i40e_msec_delay(10);
3569 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3570 printf("RX queue %d still enabled!\n", i);
3575 * ixl_handle_mdd_event
3577 * Called from interrupt handler to identify possibly malicious vfs
3578 * (But also detects events from the PF, as well)
3580 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3582 struct i40e_hw *hw = &pf->hw;
3583 device_t dev = pf->dev;
3584 bool mdd_detected = false;
3585 bool pf_mdd_detected = false;
3588 /* find what triggered the MDD event */
3589 reg = rd32(hw, I40E_GL_MDET_TX);
3590 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3591 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3592 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3593 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3594 I40E_GL_MDET_TX_EVENT_SHIFT;
3595 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3596 I40E_GL_MDET_TX_QUEUE_SHIFT;
3598 "Malicious Driver Detection event 0x%02x"
3599 " on TX queue %d pf number 0x%02x\n",
3600 event, queue, pf_num);
3601 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3602 mdd_detected = true;
3604 reg = rd32(hw, I40E_GL_MDET_RX);
3605 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3606 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3607 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3608 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3609 I40E_GL_MDET_RX_EVENT_SHIFT;
3610 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3611 I40E_GL_MDET_RX_QUEUE_SHIFT;
3613 "Malicious Driver Detection event 0x%02x"
3614 " on RX queue %d of function 0x%02x\n",
3615 event, queue, func);
3616 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3617 mdd_detected = true;
3621 reg = rd32(hw, I40E_PF_MDET_TX);
3622 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3623 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3625 "MDD TX event is for this function 0x%08x",
3627 pf_mdd_detected = true;
3629 reg = rd32(hw, I40E_PF_MDET_RX);
3630 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3631 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3633 "MDD RX event is for this function 0x%08x",
3635 pf_mdd_detected = true;
3639 /* re-enable mdd interrupt cause */
3640 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3641 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3642 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3647 ixl_enable_intr(struct ixl_vsi *vsi)
3649 struct i40e_hw *hw = vsi->hw;
3650 struct ixl_queue *que = vsi->queues;
3652 if (ixl_enable_msix) {
3653 ixl_enable_adminq(hw);
3654 for (int i = 0; i < vsi->num_queues; i++, que++)
3655 ixl_enable_queue(hw, que->me);
3657 ixl_enable_legacy(hw);
3661 ixl_disable_intr(struct ixl_vsi *vsi)
3663 struct i40e_hw *hw = vsi->hw;
3664 struct ixl_queue *que = vsi->queues;
3666 if (ixl_enable_msix) {
3667 ixl_disable_adminq(hw);
3668 for (int i = 0; i < vsi->num_queues; i++, que++)
3669 ixl_disable_queue(hw, que->me);
3671 ixl_disable_legacy(hw);
3675 ixl_enable_adminq(struct i40e_hw *hw)
3679 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3680 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3681 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3682 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3688 ixl_disable_adminq(struct i40e_hw *hw)
3692 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3693 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3699 ixl_enable_queue(struct i40e_hw *hw, int id)
3703 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3704 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3705 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3706 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3710 ixl_disable_queue(struct i40e_hw *hw, int id)
3714 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3715 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3721 ixl_enable_legacy(struct i40e_hw *hw)
3724 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3725 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3726 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3727 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3731 ixl_disable_legacy(struct i40e_hw *hw)
3735 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3736 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3742 ixl_update_stats_counters(struct ixl_pf *pf)
3744 struct i40e_hw *hw = &pf->hw;
3745 struct ixl_vsi *vsi = &pf->vsi;
3747 struct i40e_hw_port_stats *nsd = &pf->stats;
3748 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3750 /* Update hw stats */
3751 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3752 pf->stat_offsets_loaded,
3753 &osd->crc_errors, &nsd->crc_errors);
3754 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3755 pf->stat_offsets_loaded,
3756 &osd->illegal_bytes, &nsd->illegal_bytes);
3757 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3758 I40E_GLPRT_GORCL(hw->port),
3759 pf->stat_offsets_loaded,
3760 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3761 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3762 I40E_GLPRT_GOTCL(hw->port),
3763 pf->stat_offsets_loaded,
3764 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3765 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3766 pf->stat_offsets_loaded,
3767 &osd->eth.rx_discards,
3768 &nsd->eth.rx_discards);
3769 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3770 I40E_GLPRT_UPRCL(hw->port),
3771 pf->stat_offsets_loaded,
3772 &osd->eth.rx_unicast,
3773 &nsd->eth.rx_unicast);
3774 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3775 I40E_GLPRT_UPTCL(hw->port),
3776 pf->stat_offsets_loaded,
3777 &osd->eth.tx_unicast,
3778 &nsd->eth.tx_unicast);
3779 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3780 I40E_GLPRT_MPRCL(hw->port),
3781 pf->stat_offsets_loaded,
3782 &osd->eth.rx_multicast,
3783 &nsd->eth.rx_multicast);
3784 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3785 I40E_GLPRT_MPTCL(hw->port),
3786 pf->stat_offsets_loaded,
3787 &osd->eth.tx_multicast,
3788 &nsd->eth.tx_multicast);
3789 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3790 I40E_GLPRT_BPRCL(hw->port),
3791 pf->stat_offsets_loaded,
3792 &osd->eth.rx_broadcast,
3793 &nsd->eth.rx_broadcast);
3794 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3795 I40E_GLPRT_BPTCL(hw->port),
3796 pf->stat_offsets_loaded,
3797 &osd->eth.tx_broadcast,
3798 &nsd->eth.tx_broadcast);
3800 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3801 pf->stat_offsets_loaded,
3802 &osd->tx_dropped_link_down,
3803 &nsd->tx_dropped_link_down);
3804 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3805 pf->stat_offsets_loaded,
3806 &osd->mac_local_faults,
3807 &nsd->mac_local_faults);
3808 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3809 pf->stat_offsets_loaded,
3810 &osd->mac_remote_faults,
3811 &nsd->mac_remote_faults);
3812 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3813 pf->stat_offsets_loaded,
3814 &osd->rx_length_errors,
3815 &nsd->rx_length_errors);
3817 /* Flow control (LFC) stats */
3818 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3819 pf->stat_offsets_loaded,
3820 &osd->link_xon_rx, &nsd->link_xon_rx);
3821 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3822 pf->stat_offsets_loaded,
3823 &osd->link_xon_tx, &nsd->link_xon_tx);
3824 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3825 pf->stat_offsets_loaded,
3826 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3827 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3828 pf->stat_offsets_loaded,
3829 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3831 /* Priority flow control stats */
3833 for (int i = 0; i < 8; i++) {
3834 ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3835 pf->stat_offsets_loaded,
3836 &osd->priority_xon_rx[i],
3837 &nsd->priority_xon_rx[i]);
3838 ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3839 pf->stat_offsets_loaded,
3840 &osd->priority_xon_tx[i],
3841 &nsd->priority_xon_tx[i]);
3842 ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3843 pf->stat_offsets_loaded,
3844 &osd->priority_xoff_tx[i],
3845 &nsd->priority_xoff_tx[i]);
3846 ixl_stat_update32(hw,
3847 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3848 pf->stat_offsets_loaded,
3849 &osd->priority_xon_2_xoff[i],
3850 &nsd->priority_xon_2_xoff[i]);
3854 /* Packet size stats rx */
3855 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3856 I40E_GLPRT_PRC64L(hw->port),
3857 pf->stat_offsets_loaded,
3858 &osd->rx_size_64, &nsd->rx_size_64);
3859 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3860 I40E_GLPRT_PRC127L(hw->port),
3861 pf->stat_offsets_loaded,
3862 &osd->rx_size_127, &nsd->rx_size_127);
3863 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3864 I40E_GLPRT_PRC255L(hw->port),
3865 pf->stat_offsets_loaded,
3866 &osd->rx_size_255, &nsd->rx_size_255);
3867 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3868 I40E_GLPRT_PRC511L(hw->port),
3869 pf->stat_offsets_loaded,
3870 &osd->rx_size_511, &nsd->rx_size_511);
3871 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3872 I40E_GLPRT_PRC1023L(hw->port),
3873 pf->stat_offsets_loaded,
3874 &osd->rx_size_1023, &nsd->rx_size_1023);
3875 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3876 I40E_GLPRT_PRC1522L(hw->port),
3877 pf->stat_offsets_loaded,
3878 &osd->rx_size_1522, &nsd->rx_size_1522);
3879 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3880 I40E_GLPRT_PRC9522L(hw->port),
3881 pf->stat_offsets_loaded,
3882 &osd->rx_size_big, &nsd->rx_size_big);
3884 /* Packet size stats tx */
3885 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3886 I40E_GLPRT_PTC64L(hw->port),
3887 pf->stat_offsets_loaded,
3888 &osd->tx_size_64, &nsd->tx_size_64);
3889 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3890 I40E_GLPRT_PTC127L(hw->port),
3891 pf->stat_offsets_loaded,
3892 &osd->tx_size_127, &nsd->tx_size_127);
3893 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3894 I40E_GLPRT_PTC255L(hw->port),
3895 pf->stat_offsets_loaded,
3896 &osd->tx_size_255, &nsd->tx_size_255);
3897 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3898 I40E_GLPRT_PTC511L(hw->port),
3899 pf->stat_offsets_loaded,
3900 &osd->tx_size_511, &nsd->tx_size_511);
3901 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3902 I40E_GLPRT_PTC1023L(hw->port),
3903 pf->stat_offsets_loaded,
3904 &osd->tx_size_1023, &nsd->tx_size_1023);
3905 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3906 I40E_GLPRT_PTC1522L(hw->port),
3907 pf->stat_offsets_loaded,
3908 &osd->tx_size_1522, &nsd->tx_size_1522);
3909 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3910 I40E_GLPRT_PTC9522L(hw->port),
3911 pf->stat_offsets_loaded,
3912 &osd->tx_size_big, &nsd->tx_size_big);
3914 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3915 pf->stat_offsets_loaded,
3916 &osd->rx_undersize, &nsd->rx_undersize);
3917 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3918 pf->stat_offsets_loaded,
3919 &osd->rx_fragments, &nsd->rx_fragments);
3920 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3921 pf->stat_offsets_loaded,
3922 &osd->rx_oversize, &nsd->rx_oversize);
3923 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3924 pf->stat_offsets_loaded,
3925 &osd->rx_jabber, &nsd->rx_jabber);
3926 pf->stat_offsets_loaded = true;
3929 /* Update vsi stats */
3930 ixl_update_eth_stats(vsi);
3933 // ERJ - these are per-port, update all vsis?
3934 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
3938 ** Tasklet handler for MSIX Adminq interrupts
3939 ** - do outside interrupt since it might sleep
3942 ixl_do_adminq(void *context, int pending)
3944 struct ixl_pf *pf = context;
3945 struct i40e_hw *hw = &pf->hw;
3946 struct ixl_vsi *vsi = &pf->vsi;
3947 struct i40e_arq_event_info event;
3952 event.buf_len = IXL_AQ_BUF_SZ;
3953 event.msg_buf = malloc(event.buf_len,
3954 M_DEVBUF, M_NOWAIT | M_ZERO);
3955 if (!event.msg_buf) {
3956 printf("Unable to allocate adminq memory\n");
3960 /* clean and process any events */
3962 ret = i40e_clean_arq_element(hw, &event, &result);
3965 opcode = LE16_TO_CPU(event.desc.opcode);
3967 case i40e_aqc_opc_get_link_status:
3968 vsi->link_up = ixl_config_link(hw);
3969 ixl_update_link_status(pf);
3971 case i40e_aqc_opc_send_msg_to_pf:
3972 /* process pf/vf communication here */
3974 case i40e_aqc_opc_event_lan_overflow:
3978 printf("AdminQ unknown event %x\n", opcode);
3983 } while (result && (loop++ < IXL_ADM_LIMIT));
3985 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3986 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3987 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3988 free(event.msg_buf, M_DEVBUF);
3991 ixl_enable_adminq(&pf->hw);
3993 ixl_enable_intr(vsi);
3997 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4000 int error, input = 0;
4002 error = sysctl_handle_int(oidp, &input, 0, req);
4004 if (error || !req->newptr)
4008 pf = (struct ixl_pf *)arg1;
4009 ixl_print_debug_info(pf);
4016 ixl_print_debug_info(struct ixl_pf *pf)
4018 struct i40e_hw *hw = &pf->hw;
4019 struct ixl_vsi *vsi = &pf->vsi;
4020 struct ixl_queue *que = vsi->queues;
4021 struct rx_ring *rxr = &que->rxr;
4022 struct tx_ring *txr = &que->txr;
4026 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4027 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4028 printf("RX next check = %x\n", rxr->next_check);
4029 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4030 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4031 printf("TX desc avail = %x\n", txr->avail);
4033 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4034 printf("RX Bytes = %x\n", reg);
4035 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4036 printf("Port RX Bytes = %x\n", reg);
4037 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4038 printf("RX discard = %x\n", reg);
4039 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4040 printf("Port RX discard = %x\n", reg);
4042 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4043 printf("TX errors = %x\n", reg);
4044 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4045 printf("TX Bytes = %x\n", reg);
4047 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4048 printf("RX undersize = %x\n", reg);
4049 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4050 printf("RX fragments = %x\n", reg);
4051 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4052 printf("RX oversize = %x\n", reg);
4053 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4054 printf("RX length error = %x\n", reg);
4055 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4056 printf("mac remote fault = %x\n", reg);
4057 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4058 printf("mac local fault = %x\n", reg);
4062 * Update VSI-specific ethernet statistics counters.
4064 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4066 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4067 struct i40e_hw *hw = &pf->hw;
4068 struct i40e_eth_stats *es;
4069 struct i40e_eth_stats *oes;
4071 uint64_t tx_discards;
4072 struct i40e_hw_port_stats *nsd;
4073 u16 stat_idx = vsi->info.stat_counter_idx;
4075 es = &vsi->eth_stats;
4076 oes = &vsi->eth_stats_offsets;
4079 /* Gather up the stats that the hw collects */
4080 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4081 vsi->stat_offsets_loaded,
4082 &oes->tx_errors, &es->tx_errors);
4083 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4084 vsi->stat_offsets_loaded,
4085 &oes->rx_discards, &es->rx_discards);
4087 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4088 I40E_GLV_GORCL(stat_idx),
4089 vsi->stat_offsets_loaded,
4090 &oes->rx_bytes, &es->rx_bytes);
4091 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4092 I40E_GLV_UPRCL(stat_idx),
4093 vsi->stat_offsets_loaded,
4094 &oes->rx_unicast, &es->rx_unicast);
4095 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4096 I40E_GLV_MPRCL(stat_idx),
4097 vsi->stat_offsets_loaded,
4098 &oes->rx_multicast, &es->rx_multicast);
4099 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4100 I40E_GLV_BPRCL(stat_idx),
4101 vsi->stat_offsets_loaded,
4102 &oes->rx_broadcast, &es->rx_broadcast);
4104 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4105 I40E_GLV_GOTCL(stat_idx),
4106 vsi->stat_offsets_loaded,
4107 &oes->tx_bytes, &es->tx_bytes);
4108 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4109 I40E_GLV_UPTCL(stat_idx),
4110 vsi->stat_offsets_loaded,
4111 &oes->tx_unicast, &es->tx_unicast);
4112 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4113 I40E_GLV_MPTCL(stat_idx),
4114 vsi->stat_offsets_loaded,
4115 &oes->tx_multicast, &es->tx_multicast);
4116 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4117 I40E_GLV_BPTCL(stat_idx),
4118 vsi->stat_offsets_loaded,
4119 &oes->tx_broadcast, &es->tx_broadcast);
4120 vsi->stat_offsets_loaded = true;
4122 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4123 for (i = 0; i < vsi->num_queues; i++)
4124 tx_discards += vsi->queues[i].txr.br->br_drops;
4126 /* Update ifnet stats */
4127 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4130 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4133 IXL_SET_IBYTES(vsi, es->rx_bytes);
4134 IXL_SET_OBYTES(vsi, es->tx_bytes);
4135 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4136 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4138 IXL_SET_OERRORS(vsi, es->tx_errors);
4139 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4140 IXL_SET_OQDROPS(vsi, tx_discards);
4141 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4142 IXL_SET_COLLISIONS(vsi, 0);
4146 * Reset all of the stats for the given pf
4148 void ixl_pf_reset_stats(struct ixl_pf *pf)
4150 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4151 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4152 pf->stat_offsets_loaded = false;
4156 * Resets all stats of the given vsi
4158 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4160 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4161 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4162 vsi->stat_offsets_loaded = false;
4166 * Read and update a 48 bit stat from the hw
4168 * Since the device stats are not reset at PFReset, they likely will not
4169 * be zeroed when the driver starts. We'll save the first values read
4170 * and use them as offsets to be subtracted from the raw values in order
4171 * to report stats that count from zero.
4174 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4175 bool offset_loaded, u64 *offset, u64 *stat)
4179 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4180 new_data = rd64(hw, loreg);
4183 * Use two rd32's instead of one rd64; FreeBSD versions before
4184 * 10 don't support 8 byte bus reads/writes.
4186 new_data = rd32(hw, loreg);
4187 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4192 if (new_data >= *offset)
4193 *stat = new_data - *offset;
4195 *stat = (new_data + ((u64)1 << 48)) - *offset;
4196 *stat &= 0xFFFFFFFFFFFFULL;
4200 * Read and update a 32 bit stat from the hw
4203 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4204 bool offset_loaded, u64 *offset, u64 *stat)
4208 new_data = rd32(hw, reg);
4211 if (new_data >= *offset)
4212 *stat = (u32)(new_data - *offset);
4214 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4218 ** Set flow control using sysctl:
4225 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4228 * TODO: ensure flow control is disabled if
4229 * priority flow control is enabled
4231 * TODO: ensure tx CRC by hardware should be enabled
4232 * if tx flow control is enabled.
4234 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4235 struct i40e_hw *hw = &pf->hw;
4236 device_t dev = pf->dev;
4237 int requested_fc = 0, error = 0;
4238 enum i40e_status_code aq_error = 0;
4241 aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4244 "%s: Error retrieving link info from aq, %d\n",
4245 __func__, aq_error);
4249 /* Read in new mode */
4250 requested_fc = hw->fc.current_mode;
4251 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4252 if ((error) || (req->newptr == NULL))
4254 if (requested_fc < 0 || requested_fc > 3) {
4256 "Invalid fc mode; valid modes are 0 through 3\n");
4261 ** Changing flow control mode currently does not work on
4264 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4265 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4266 device_printf(dev, "Changing flow control mode unsupported"
4267 " on 40GBase-CR4 media.\n");
4271 /* Set fc ability for port */
4272 hw->fc.requested_mode = requested_fc;
4273 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4276 "%s: Error setting new fc mode %d; fc_err %#x\n",
4277 __func__, aq_error, fc_aq_err);
4281 if (hw->fc.current_mode != hw->fc.requested_mode) {
4282 device_printf(dev, "%s: FC set failure:\n", __func__);
4283 device_printf(dev, "%s: Current: %s / Requested: %s\n",
4285 ixl_fc_string[hw->fc.current_mode],
4286 ixl_fc_string[hw->fc.requested_mode]);
4293 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4295 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4296 struct i40e_hw *hw = &pf->hw;
4297 int error = 0, index = 0;
4308 ixl_update_link_status(pf);
4310 switch (hw->phy.link_info.link_speed) {
4311 case I40E_LINK_SPEED_100MB:
4314 case I40E_LINK_SPEED_1GB:
4317 case I40E_LINK_SPEED_10GB:
4320 case I40E_LINK_SPEED_40GB:
4323 case I40E_LINK_SPEED_20GB:
4326 case I40E_LINK_SPEED_UNKNOWN:
4332 error = sysctl_handle_string(oidp, speeds[index],
4333 strlen(speeds[index]), req);
4338 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4340 struct i40e_hw *hw = &pf->hw;
4341 device_t dev = pf->dev;
4342 struct i40e_aq_get_phy_abilities_resp abilities;
4343 struct i40e_aq_set_phy_config config;
4344 enum i40e_status_code aq_error = 0;
4346 /* Get current capability information */
4347 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4349 device_printf(dev, "%s: Error getting phy capabilities %d,"
4350 " aq error: %d\n", __func__, aq_error,
4351 hw->aq.asq_last_status);
4355 /* Prepare new config */
4356 bzero(&config, sizeof(config));
4357 config.phy_type = abilities.phy_type;
4358 config.abilities = abilities.abilities
4359 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4360 config.eee_capability = abilities.eee_capability;
4361 config.eeer = abilities.eeer_val;
4362 config.low_power_ctrl = abilities.d3_lpan;
4363 /* Translate into aq cmd link_speed */
4365 config.link_speed |= I40E_LINK_SPEED_10GB;
4367 config.link_speed |= I40E_LINK_SPEED_1GB;
4369 config.link_speed |= I40E_LINK_SPEED_100MB;
4371 /* Do aq command & restart link */
4372 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4374 device_printf(dev, "%s: Error setting new phy config %d,"
4375 " aq error: %d\n", __func__, aq_error,
4376 hw->aq.asq_last_status);
4384 ** Control link advertise speed:
4386 ** 0x1 - advertise 100 Mb
4387 ** 0x2 - advertise 1G
4388 ** 0x4 - advertise 10G
4390 ** Does not work on 40G devices.
4393 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4395 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4396 struct i40e_hw *hw = &pf->hw;
4397 device_t dev = pf->dev;
4398 int requested_ls = 0;
4402 ** FW doesn't support changing advertised speed
4403 ** for 40G devices; speed is always 40G.
4405 if (i40e_is_40G_device(hw->device_id))
4408 /* Read in new mode */
4409 requested_ls = pf->advertised_speed;
4410 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4411 if ((error) || (req->newptr == NULL))
4413 if (requested_ls < 1 || requested_ls > 7) {
4415 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4419 /* Exit if no change */
4420 if (pf->advertised_speed == requested_ls)
4423 error = ixl_set_advertised_speeds(pf, requested_ls);
4427 pf->advertised_speed = requested_ls;
4428 ixl_update_link_status(pf);
4433 ** Get the width and transaction speed of
4434 ** the bus this adapter is plugged into.
4437 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4443 /* Get the PCI Express Capabilities offset */
4444 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4446 /* ...and read the Link Status Register */
4447 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4449 switch (link & I40E_PCI_LINK_WIDTH) {
4450 case I40E_PCI_LINK_WIDTH_1:
4451 hw->bus.width = i40e_bus_width_pcie_x1;
4453 case I40E_PCI_LINK_WIDTH_2:
4454 hw->bus.width = i40e_bus_width_pcie_x2;
4456 case I40E_PCI_LINK_WIDTH_4:
4457 hw->bus.width = i40e_bus_width_pcie_x4;
4459 case I40E_PCI_LINK_WIDTH_8:
4460 hw->bus.width = i40e_bus_width_pcie_x8;
4463 hw->bus.width = i40e_bus_width_unknown;
4467 switch (link & I40E_PCI_LINK_SPEED) {
4468 case I40E_PCI_LINK_SPEED_2500:
4469 hw->bus.speed = i40e_bus_speed_2500;
4471 case I40E_PCI_LINK_SPEED_5000:
4472 hw->bus.speed = i40e_bus_speed_5000;
4474 case I40E_PCI_LINK_SPEED_8000:
4475 hw->bus.speed = i40e_bus_speed_8000;
4478 hw->bus.speed = i40e_bus_speed_unknown;
4483 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4484 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4485 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4486 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4487 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4488 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4489 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4492 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4493 (hw->bus.speed < i40e_bus_speed_8000)) {
4494 device_printf(dev, "PCI-Express bandwidth available"
4495 " for this device\n is not sufficient for"
4496 " normal operation.\n");
4497 device_printf(dev, "For expected performance a x8 "
4498 "PCIE Gen3 slot is required.\n");
4505 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4507 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4508 struct i40e_hw *hw = &pf->hw;
4511 snprintf(buf, sizeof(buf),
4512 "f%d.%d a%d.%d n%02x.%02x e%08x",
4513 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4514 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4515 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4516 IXL_NVM_VERSION_HI_SHIFT,
4517 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4518 IXL_NVM_VERSION_LO_SHIFT,
4520 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4526 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4528 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4529 struct i40e_hw *hw = &pf->hw;
4530 struct i40e_link_status link_status;
4533 enum i40e_status_code aq_error = 0;
4535 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4537 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4542 "PHY Type : %#04x\n"
4544 "Link info: %#04x\n"
4547 link_status.phy_type, link_status.link_speed,
4548 link_status.link_info, link_status.an_info,
4549 link_status.ext_info);
4551 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4555 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4557 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4558 struct i40e_hw *hw = &pf->hw;
4559 struct i40e_aq_get_phy_abilities_resp abilities_resp;
4562 enum i40e_status_code aq_error = 0;
4564 // TODO: Print out list of qualified modules as well?
4565 aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4567 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4572 "PHY Type : %#010x\n"
4574 "Abilities: %#04x\n"
4576 "EEER reg : %#010x\n"
4578 abilities_resp.phy_type, abilities_resp.link_speed,
4579 abilities_resp.abilities, abilities_resp.eee_capability,
4580 abilities_resp.eeer_val, abilities_resp.d3_lpan);
4582 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4586 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4588 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4589 struct ixl_vsi *vsi = &pf->vsi;
4590 struct ixl_mac_filter *f;
4595 int ftl_counter = 0;
4599 SLIST_FOREACH(f, &vsi->ftl, next) {
4604 sysctl_handle_string(oidp, "(none)", 6, req);
4608 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4609 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4611 sprintf(buf_i++, "\n");
4612 SLIST_FOREACH(f, &vsi->ftl, next) {
4614 MAC_FORMAT ", vlan %4d, flags %#06x",
4615 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4617 /* don't print '\n' for last entry */
4618 if (++ftl_counter != ftl_len) {
4619 sprintf(buf_i, "\n");
4624 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4626 printf("sysctl error: %d\n", error);
4627 free(buf, M_DEVBUF);
4631 #define IXL_SW_RES_SIZE 0x14
4633 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4635 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4636 struct i40e_hw *hw = &pf->hw;
4637 device_t dev = pf->dev;
4642 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4644 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4646 device_printf(dev, "Could not allocate sbuf for output.\n");
4650 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4655 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4656 __func__, error, hw->aq.asq_last_status);
4660 device_printf(dev, "Num_entries: %d\n", num_entries);
4662 sbuf_cat(buf, "\n");
4664 "Type | Guaranteed | Total | Used | Un-allocated\n"
4665 " | (this) | (all) | (this) | (all) \n");
4666 for (int i = 0; i < num_entries; i++) {
4668 "%#4x | %10d %5d %6d %12d",
4669 resp[i].resource_type,
4673 resp[i].total_unalloced);
4674 if (i < num_entries - 1)
4675 sbuf_cat(buf, "\n");
4678 error = sbuf_finish(buf);
4680 device_printf(dev, "Error finishing sbuf: %d\n", error);
4685 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4687 device_printf(dev, "sysctl error: %d\n", error);
4693 ** Caller must init and delete sbuf; this function will clear and
4694 ** finish it for caller.
4697 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4701 if (seid == 0 && uplink)
4702 sbuf_cat(s, "Network");
4704 sbuf_cat(s, "Host");
4708 sbuf_printf(s, "MAC %d", seid - 2);
4709 else if (seid <= 15)
4710 sbuf_cat(s, "Reserved");
4711 else if (seid <= 31)
4712 sbuf_printf(s, "PF %d", seid - 16);
4713 else if (seid <= 159)
4714 sbuf_printf(s, "VF %d", seid - 32);
4715 else if (seid <= 287)
4716 sbuf_cat(s, "Reserved");
4717 else if (seid <= 511)
4718 sbuf_cat(s, "Other"); // for other structures
4719 else if (seid <= 895)
4720 sbuf_printf(s, "VSI %d", seid - 512);
4721 else if (seid <= 1023)
4722 sbuf_printf(s, "Reserved");
4724 sbuf_cat(s, "Invalid");
4727 return sbuf_data(s);
4731 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4733 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4734 struct i40e_hw *hw = &pf->hw;
4735 device_t dev = pf->dev;
4739 u8 aq_buf[I40E_AQ_LARGE_BUF];
4742 struct i40e_aqc_get_switch_config_resp *sw_config;
4743 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4745 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4747 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4751 error = i40e_aq_get_switch_config(hw, sw_config,
4752 sizeof(aq_buf), &next, NULL);
4754 device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4755 __func__, error, hw->aq.asq_last_status);
4760 nmbuf = sbuf_new_auto();
4762 device_printf(dev, "Could not allocate sbuf for name output.\n");
4766 sbuf_cat(buf, "\n");
4767 // Assuming <= 255 elements in switch
4768 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4770 ** Revision -- all elements are revision 1 for now
4773 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4774 " | | | (uplink)\n");
4775 for (int i = 0; i < sw_config->header.num_reported; i++) {
4776 // "%4d (%8s) | %8s %8s %#8x",
4777 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4779 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4780 sbuf_cat(buf, " | ");
4781 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4783 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4785 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4786 if (i < sw_config->header.num_reported - 1)
4787 sbuf_cat(buf, "\n");
4791 error = sbuf_finish(buf);
4793 device_printf(dev, "Error finishing sbuf: %d\n", error);
4798 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4800 device_printf(dev, "sysctl error: %d\n", error);
4807 ** Dump TX desc given index.
4808 ** Doesn't work; don't use.
4809 ** TODO: Also needs a queue index input!
4812 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4814 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4815 device_t dev = pf->dev;
4821 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4823 device_printf(dev, "Could not allocate sbuf for output.\n");
4828 error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4831 if (req->newptr == NULL)
4832 return (EIO); // fix
4833 if (desc_idx > 1024) { // fix
4835 "Invalid descriptor index, needs to be < 1024\n"); // fix
4839 // Don't use this sysctl yet
4843 sbuf_cat(buf, "\n");
4846 struct ixl_queue *que = pf->vsi.queues;
4847 struct tx_ring *txr = &(que[1].txr);
4848 struct i40e_tx_desc *txd = &txr->base[desc_idx];
4850 sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4851 sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4852 sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4854 error = sbuf_finish(buf);
4856 device_printf(dev, "Error finishing sbuf: %d\n", error);
4861 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4863 device_printf(dev, "sysctl error: %d\n", error);