1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
40 /*********************************************************************
42 *********************************************************************/
43 char ixl_driver_version[] = "1.2.2";
45 /*********************************************************************
48 * Used by probe to select devices to load on
49 * Last field stores an index into ixl_strings
50 * Last entry must be all 0s
52 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 *********************************************************************/
55 static ixl_vendor_info_t ixl_vendor_info_array[] =
57 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65 /* required last entry */
69 /*********************************************************************
70 * Table of branding strings
71 *********************************************************************/
73 static char *ixl_strings[] = {
74 "Intel(R) Ethernet Connection XL710 Driver"
78 /*********************************************************************
80 *********************************************************************/
81 static int ixl_probe(device_t);
82 static int ixl_attach(device_t);
83 static int ixl_detach(device_t);
84 static int ixl_shutdown(device_t);
85 static int ixl_get_hw_capabilities(struct ixl_pf *);
86 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
88 static void ixl_init(void *);
89 static void ixl_init_locked(struct ixl_pf *);
90 static void ixl_stop(struct ixl_pf *);
91 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
92 static int ixl_media_change(struct ifnet *);
93 static void ixl_update_link_status(struct ixl_pf *);
94 static int ixl_allocate_pci_resources(struct ixl_pf *);
95 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
96 static int ixl_setup_stations(struct ixl_pf *);
97 static int ixl_setup_vsi(struct ixl_vsi *);
98 static int ixl_initialize_vsi(struct ixl_vsi *);
99 static int ixl_assign_vsi_msix(struct ixl_pf *);
100 static int ixl_assign_vsi_legacy(struct ixl_pf *);
101 static int ixl_init_msix(struct ixl_pf *);
102 static void ixl_configure_msix(struct ixl_pf *);
103 static void ixl_configure_itr(struct ixl_pf *);
104 static void ixl_configure_legacy(struct ixl_pf *);
105 static void ixl_free_pci_resources(struct ixl_pf *);
106 static void ixl_local_timer(void *);
107 static int ixl_setup_interface(device_t, struct ixl_vsi *);
108 static bool ixl_config_link(struct i40e_hw *);
109 static void ixl_config_rss(struct ixl_vsi *);
110 static void ixl_set_queue_rx_itr(struct ixl_queue *);
111 static void ixl_set_queue_tx_itr(struct ixl_queue *);
113 static void ixl_enable_rings(struct ixl_vsi *);
114 static void ixl_disable_rings(struct ixl_vsi *);
115 static void ixl_enable_intr(struct ixl_vsi *);
116 static void ixl_disable_intr(struct ixl_vsi *);
118 static void ixl_enable_adminq(struct i40e_hw *);
119 static void ixl_disable_adminq(struct i40e_hw *);
120 static void ixl_enable_queue(struct i40e_hw *, int);
121 static void ixl_disable_queue(struct i40e_hw *, int);
122 static void ixl_enable_legacy(struct i40e_hw *);
123 static void ixl_disable_legacy(struct i40e_hw *);
125 static void ixl_set_promisc(struct ixl_vsi *);
126 static void ixl_add_multi(struct ixl_vsi *);
127 static void ixl_del_multi(struct ixl_vsi *);
128 static void ixl_register_vlan(void *, struct ifnet *, u16);
129 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
130 static void ixl_setup_vlan_filters(struct ixl_vsi *);
132 static void ixl_init_filters(struct ixl_vsi *);
133 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
134 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
135 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
136 static void ixl_del_hw_filters(struct ixl_vsi *, int);
137 static struct ixl_mac_filter *
138 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
139 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
141 /* Sysctl debug interface */
142 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
143 static void ixl_print_debug_info(struct ixl_pf *);
145 /* The MSI/X Interrupt handlers */
146 static void ixl_intr(void *);
147 static void ixl_msix_que(void *);
148 static void ixl_msix_adminq(void *);
149 static void ixl_handle_mdd_event(struct ixl_pf *);
151 /* Deferred interrupt tasklets */
152 static void ixl_do_adminq(void *, int);
154 /* Sysctl handlers */
155 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
156 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
157 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
160 static void ixl_add_hw_stats(struct ixl_pf *);
161 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
162 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
163 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
164 struct sysctl_oid_list *,
165 struct i40e_eth_stats *);
166 static void ixl_update_stats_counters(struct ixl_pf *);
167 static void ixl_update_eth_stats(struct ixl_vsi *);
168 static void ixl_pf_reset_stats(struct ixl_pf *);
169 static void ixl_vsi_reset_stats(struct ixl_vsi *);
170 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
172 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
176 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
177 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
178 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
179 static int ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
180 static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
183 /*********************************************************************
184 * FreeBSD Device Interface Entry Points
185 *********************************************************************/
187 static device_method_t ixl_methods[] = {
188 /* Device interface */
189 DEVMETHOD(device_probe, ixl_probe),
190 DEVMETHOD(device_attach, ixl_attach),
191 DEVMETHOD(device_detach, ixl_detach),
192 DEVMETHOD(device_shutdown, ixl_shutdown),
196 static driver_t ixl_driver = {
197 "ixl", ixl_methods, sizeof(struct ixl_pf),
200 devclass_t ixl_devclass;
201 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
203 MODULE_DEPEND(ixl, pci, 1, 1, 1);
204 MODULE_DEPEND(ixl, ether, 1, 1, 1);
207 ** Global reset mutex
209 static struct mtx ixl_reset_mtx;
212 ** TUNEABLE PARAMETERS:
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
216 "IXL driver parameters");
219 * MSIX should be the default for best performance,
220 * but this allows it to be forced off for testing.
222 static int ixl_enable_msix = 1;
223 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
224 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
225 "Enable MSI-X interrupts");
228 ** Number of descriptors per ring:
229 ** - TX and RX are the same size
231 static int ixl_ringsz = DEFAULT_RING;
232 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
234 &ixl_ringsz, 0, "Descriptor Ring Size");
237 ** This can be set manually, if left as 0 the
238 ** number of queues will be calculated based
239 ** on cpus and msix vectors available.
241 int ixl_max_queues = 0;
242 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
244 &ixl_max_queues, 0, "Number of Queues");
247 ** Controls for Interrupt Throttling
248 ** - true/false for dynamic adjustment
249 ** - default values for static ITR
251 int ixl_dynamic_rx_itr = 0;
252 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
253 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
254 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
256 int ixl_dynamic_tx_itr = 0;
257 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
259 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
261 int ixl_rx_itr = IXL_ITR_8K;
262 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
264 &ixl_rx_itr, 0, "RX Interrupt Rate");
266 int ixl_tx_itr = IXL_ITR_4K;
267 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
269 &ixl_tx_itr, 0, "TX Interrupt Rate");
272 static int ixl_enable_fdir = 1;
273 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
274 /* Rate at which we sample */
275 int ixl_atr_rate = 20;
276 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
279 static char *ixl_fc_string[6] = {
289 /*********************************************************************
290 * Device identification routine
292 * ixl_probe determines if the driver should be loaded on
293 * the hardware based on PCI vendor/device id of the device.
295 * return BUS_PROBE_DEFAULT on success, positive on failure
296 *********************************************************************/
299 ixl_probe(device_t dev)
301 ixl_vendor_info_t *ent;
303 u16 pci_vendor_id, pci_device_id;
304 u16 pci_subvendor_id, pci_subdevice_id;
305 char device_name[256];
306 static bool lock_init = FALSE;
308 INIT_DEBUGOUT("ixl_probe: begin");
310 pci_vendor_id = pci_get_vendor(dev);
311 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
314 pci_device_id = pci_get_device(dev);
315 pci_subvendor_id = pci_get_subvendor(dev);
316 pci_subdevice_id = pci_get_subdevice(dev);
318 ent = ixl_vendor_info_array;
319 while (ent->vendor_id != 0) {
320 if ((pci_vendor_id == ent->vendor_id) &&
321 (pci_device_id == ent->device_id) &&
323 ((pci_subvendor_id == ent->subvendor_id) ||
324 (ent->subvendor_id == 0)) &&
326 ((pci_subdevice_id == ent->subdevice_id) ||
327 (ent->subdevice_id == 0))) {
328 sprintf(device_name, "%s, Version - %s",
329 ixl_strings[ent->index],
331 device_set_desc_copy(dev, device_name);
332 /* One shot mutex init */
333 if (lock_init == FALSE) {
335 mtx_init(&ixl_reset_mtx,
337 "IXL RESET Lock", MTX_DEF);
339 return (BUS_PROBE_DEFAULT);
346 /*********************************************************************
347 * Device initialization routine
349 * The attach entry point is called when the driver is being loaded.
350 * This routine identifies the type of hardware, allocates all resources
351 * and initializes the hardware.
353 * return 0 on success, positive on failure
354 *********************************************************************/
357 ixl_attach(device_t dev)
365 INIT_DEBUGOUT("ixl_attach: begin");
367 /* Allocate, clear, and link in our primary soft structure */
368 pf = device_get_softc(dev);
369 pf->dev = pf->osdep.dev = dev;
373 ** Note this assumes we have a single embedded VSI,
374 ** this could be enhanced later to allocate multiple
380 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
382 /* Set up the timer callout */
383 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
386 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
387 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
388 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
389 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
391 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
392 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
394 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
396 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
399 pf, 0, ixl_current_speed, "A", "Current Port Speed");
401 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
404 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
406 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
409 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
411 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
412 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
414 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
416 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
417 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418 OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
419 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
422 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
423 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
424 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
425 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
427 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
430 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
432 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
433 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
434 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
435 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
437 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
438 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
439 OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
440 pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
442 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
443 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
444 OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
445 pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
448 /* Save off the information about this board */
449 hw->vendor_id = pci_get_vendor(dev);
450 hw->device_id = pci_get_device(dev);
451 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
452 hw->subsystem_vendor_id =
453 pci_read_config(dev, PCIR_SUBVEND_0, 2);
454 hw->subsystem_device_id =
455 pci_read_config(dev, PCIR_SUBDEV_0, 2);
457 hw->bus.device = pci_get_slot(dev);
458 hw->bus.func = pci_get_function(dev);
460 /* Do PCI setup - map BAR0, etc */
461 if (ixl_allocate_pci_resources(pf)) {
462 device_printf(dev, "Allocation of PCI resources failed\n");
467 /* Create for initial debugging use */
468 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
469 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
470 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
471 ixl_debug_info, "I", "Debug Information");
474 /* Establish a clean starting point */
476 error = i40e_pf_reset(hw);
478 device_printf(dev,"PF reset failure %x\n", error);
483 /* For now always do an initial CORE reset on first device */
485 static int ixl_dev_count;
486 static int ixl_dev_track[32];
488 int i, found = FALSE;
489 u16 bus = pci_get_bus(dev);
491 mtx_lock(&ixl_reset_mtx);
492 my_dev = (bus << 8) | hw->bus.device;
494 for (i = 0; i < ixl_dev_count; i++) {
495 if (ixl_dev_track[i] == my_dev)
502 ixl_dev_track[ixl_dev_count] = my_dev;
505 INIT_DEBUGOUT("Initial CORE RESET\n");
506 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
511 reg = rd32(hw, I40E_GLGEN_RSTAT);
512 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
517 wr32(hw, I40E_PF_ATQLEN, 0);
518 wr32(hw, I40E_PF_ATQBAL, 0);
519 wr32(hw, I40E_PF_ATQBAH, 0);
520 i40e_clear_pxe_mode(hw);
522 mtx_unlock(&ixl_reset_mtx);
525 /* Set admin queue parameters */
526 hw->aq.num_arq_entries = IXL_AQ_LEN;
527 hw->aq.num_asq_entries = IXL_AQ_LEN;
528 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
529 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
531 /* Initialize the shared code */
532 error = i40e_init_shared_code(hw);
534 device_printf(dev,"Unable to initialize the shared code\n");
539 /* Set up the admin queue */
540 error = i40e_init_adminq(hw);
542 device_printf(dev, "The driver for the device stopped "
543 "because the NVM image is newer than expected.\n"
544 "You must install the most recent version of "
545 " the network driver.\n");
548 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
550 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
551 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
552 device_printf(dev, "The driver for the device detected "
553 "a newer version of the NVM image than expected.\n"
554 "Please install the most recent version of the network driver.\n");
555 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
556 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
557 device_printf(dev, "The driver for the device detected "
558 "an older version of the NVM image than expected.\n"
559 "Please update the NVM image.\n");
562 i40e_clear_pxe_mode(hw);
564 /* Get capabilities from the device */
565 error = ixl_get_hw_capabilities(pf);
567 device_printf(dev, "HW capabilities failure!\n");
571 /* Set up host memory cache */
572 error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
574 device_printf(dev, "init_lan_hmc failed: %d\n", error);
578 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
580 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
584 /* Disable LLDP from the firmware */
585 i40e_aq_stop_lldp(hw, TRUE, NULL);
587 i40e_get_mac_addr(hw, hw->mac.addr);
588 error = i40e_validate_mac_addr(hw->mac.addr);
590 device_printf(dev, "validate_mac_addr failed: %d\n", error);
593 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
594 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
596 if (ixl_setup_stations(pf) != 0) {
597 device_printf(dev, "setup stations failed!\n");
602 /* Initialize mac filter list for VSI */
603 SLIST_INIT(&vsi->ftl);
605 /* Set up interrupt routing here */
607 error = ixl_assign_vsi_msix(pf);
609 error = ixl_assign_vsi_legacy(pf);
614 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
616 device_printf(dev, "link restart failed, aq_err=%d\n",
617 pf->hw.aq.asq_last_status);
620 /* Determine link state */
621 vsi->link_up = ixl_config_link(hw);
623 /* Report if Unqualified modules are found */
624 if ((vsi->link_up == FALSE) &&
625 (pf->hw.phy.link_info.link_info &
626 I40E_AQ_MEDIA_AVAILABLE) &&
627 (!(pf->hw.phy.link_info.an_info &
628 I40E_AQ_QUALIFIED_MODULE)))
629 device_printf(dev, "Link failed because "
630 "an unqualified module was detected\n");
632 /* Setup OS specific network interface */
633 if (ixl_setup_interface(dev, vsi) != 0)
636 /* Get the bus configuration and set the shared code */
637 bus = ixl_get_bus_info(hw, dev);
638 i40e_set_pci_config_data(hw, bus);
640 /* Initialize statistics */
641 ixl_pf_reset_stats(pf);
642 ixl_update_stats_counters(pf);
643 ixl_add_hw_stats(pf);
645 /* Register for VLAN events */
646 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
647 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
648 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
649 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
651 INIT_DEBUGOUT("ixl_attach: end");
657 i40e_shutdown_lan_hmc(hw);
659 i40e_shutdown_adminq(hw);
661 if (vsi->ifp != NULL)
663 ixl_free_pci_resources(pf);
664 IXL_PF_LOCK_DESTROY(pf);
668 /*********************************************************************
669 * Device removal routine
671 * The detach entry point is called when the driver is being removed.
672 * This routine stops the adapter and deallocates all the resources
673 * that were allocated for driver operation.
675 * return 0 on success, positive on failure
676 *********************************************************************/
679 ixl_detach(device_t dev)
681 struct ixl_pf *pf = device_get_softc(dev);
682 struct i40e_hw *hw = &pf->hw;
683 struct ixl_vsi *vsi = &pf->vsi;
684 struct ixl_queue *que = vsi->queues;
687 INIT_DEBUGOUT("ixl_detach: begin");
689 /* Make sure VLANS are not using driver */
690 if (vsi->ifp->if_vlantrunk != NULL) {
691 device_printf(dev,"Vlan in use, detach first\n");
699 for (int i = 0; i < vsi->num_queues; i++, que++) {
701 taskqueue_drain(que->tq, &que->task);
702 taskqueue_drain(que->tq, &que->tx_task);
703 taskqueue_free(que->tq);
707 /* Shutdown LAN HMC */
708 status = i40e_shutdown_lan_hmc(hw);
711 "Shutdown LAN HMC failed with code %d\n", status);
713 /* Shutdown admin queue */
714 status = i40e_shutdown_adminq(hw);
717 "Shutdown Admin queue failed with code %d\n", status);
719 /* Unregister VLAN events */
720 if (vsi->vlan_attach != NULL)
721 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
722 if (vsi->vlan_detach != NULL)
723 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
725 ether_ifdetach(vsi->ifp);
726 callout_drain(&pf->timer);
728 ixl_free_pci_resources(pf);
729 bus_generic_detach(dev);
732 IXL_PF_LOCK_DESTROY(pf);
736 /*********************************************************************
738 * Shutdown entry point
740 **********************************************************************/
743 ixl_shutdown(device_t dev)
745 struct ixl_pf *pf = device_get_softc(dev);
753 /*********************************************************************
755 * Get the hardware capabilities
757 **********************************************************************/
760 ixl_get_hw_capabilities(struct ixl_pf *pf)
762 struct i40e_aqc_list_capabilities_element_resp *buf;
763 struct i40e_hw *hw = &pf->hw;
764 device_t dev = pf->dev;
769 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
771 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773 device_printf(dev, "Unable to allocate cap memory\n");
777 /* This populates the hw struct */
778 error = i40e_aq_discover_capabilities(hw, buf, len,
779 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
781 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
783 /* retry once with a larger buffer */
787 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788 device_printf(dev, "capability discovery failed: %d\n",
789 pf->hw.aq.asq_last_status);
793 /* Capture this PF's starting queue pair */
794 pf->qbase = hw->func_caps.base_queue;
797 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799 hw->pf_id, hw->func_caps.num_vfs,
800 hw->func_caps.num_msix_vectors,
801 hw->func_caps.num_msix_vectors_vf,
802 hw->func_caps.fd_filters_guaranteed,
803 hw->func_caps.fd_filters_best_effort,
804 hw->func_caps.num_tx_qp,
805 hw->func_caps.num_rx_qp,
806 hw->func_caps.base_queue);
812 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
814 device_t dev = vsi->dev;
816 /* Enable/disable TXCSUM/TSO4 */
817 if (!(ifp->if_capenable & IFCAP_TXCSUM)
818 && !(ifp->if_capenable & IFCAP_TSO4)) {
819 if (mask & IFCAP_TXCSUM) {
820 ifp->if_capenable |= IFCAP_TXCSUM;
821 /* enable TXCSUM, restore TSO if previously enabled */
822 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824 ifp->if_capenable |= IFCAP_TSO4;
827 else if (mask & IFCAP_TSO4) {
828 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
831 "TSO4 requires txcsum, enabling both...\n");
833 } else if((ifp->if_capenable & IFCAP_TXCSUM)
834 && !(ifp->if_capenable & IFCAP_TSO4)) {
835 if (mask & IFCAP_TXCSUM)
836 ifp->if_capenable &= ~IFCAP_TXCSUM;
837 else if (mask & IFCAP_TSO4)
838 ifp->if_capenable |= IFCAP_TSO4;
839 } else if((ifp->if_capenable & IFCAP_TXCSUM)
840 && (ifp->if_capenable & IFCAP_TSO4)) {
841 if (mask & IFCAP_TXCSUM) {
842 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
845 "TSO4 requires txcsum, disabling both...\n");
846 } else if (mask & IFCAP_TSO4)
847 ifp->if_capenable &= ~IFCAP_TSO4;
850 /* Enable/disable TXCSUM_IPV6/TSO6 */
851 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852 && !(ifp->if_capenable & IFCAP_TSO6)) {
853 if (mask & IFCAP_TXCSUM_IPV6) {
854 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857 ifp->if_capenable |= IFCAP_TSO6;
859 } else if (mask & IFCAP_TSO6) {
860 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
863 "TSO6 requires txcsum6, enabling both...\n");
865 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866 && !(ifp->if_capenable & IFCAP_TSO6)) {
867 if (mask & IFCAP_TXCSUM_IPV6)
868 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869 else if (mask & IFCAP_TSO6)
870 ifp->if_capenable |= IFCAP_TSO6;
871 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872 && (ifp->if_capenable & IFCAP_TSO6)) {
873 if (mask & IFCAP_TXCSUM_IPV6) {
874 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
877 "TSO6 requires txcsum6, disabling both...\n");
878 } else if (mask & IFCAP_TSO6)
879 ifp->if_capenable &= ~IFCAP_TSO6;
883 /*********************************************************************
886 * ixl_ioctl is called when the user wants to configure the
889 * return 0 on success, positive on failure
890 **********************************************************************/
893 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
895 struct ixl_vsi *vsi = ifp->if_softc;
896 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
897 struct ifreq *ifr = (struct ifreq *) data;
898 #if defined(INET) || defined(INET6)
899 struct ifaddr *ifa = (struct ifaddr *)data;
900 bool avoid_reset = FALSE;
908 if (ifa->ifa_addr->sa_family == AF_INET)
912 if (ifa->ifa_addr->sa_family == AF_INET6)
915 #if defined(INET) || defined(INET6)
917 ** Calling init results in link renegotiation,
918 ** so we avoid doing it when possible.
921 ifp->if_flags |= IFF_UP;
922 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
925 if (!(ifp->if_flags & IFF_NOARP))
926 arp_ifinit(ifp, ifa);
929 error = ether_ioctl(ifp, command, data);
933 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
934 if (ifr->ifr_mtu > IXL_MAX_FRAME -
935 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
939 ifp->if_mtu = ifr->ifr_mtu;
940 vsi->max_frame_size =
941 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
942 + ETHER_VLAN_ENCAP_LEN;
948 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
950 if (ifp->if_flags & IFF_UP) {
951 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
952 if ((ifp->if_flags ^ pf->if_flags) &
953 (IFF_PROMISC | IFF_ALLMULTI)) {
954 ixl_set_promisc(vsi);
959 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
961 pf->if_flags = ifp->if_flags;
965 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
966 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968 ixl_disable_intr(vsi);
970 ixl_enable_intr(vsi);
975 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
978 ixl_disable_intr(vsi);
980 ixl_enable_intr(vsi);
986 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
987 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
991 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
992 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
994 ixl_cap_txcsum_tso(vsi, ifp, mask);
996 if (mask & IFCAP_RXCSUM)
997 ifp->if_capenable ^= IFCAP_RXCSUM;
998 if (mask & IFCAP_RXCSUM_IPV6)
999 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1000 if (mask & IFCAP_LRO)
1001 ifp->if_capenable ^= IFCAP_LRO;
1002 if (mask & IFCAP_VLAN_HWTAGGING)
1003 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1004 if (mask & IFCAP_VLAN_HWFILTER)
1005 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1006 if (mask & IFCAP_VLAN_HWTSO)
1007 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1008 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1010 ixl_init_locked(pf);
1013 VLAN_CAPABILITIES(ifp);
1019 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1020 error = ether_ioctl(ifp, command, data);
1028 /*********************************************************************
1031 * This routine is used in two ways. It is used by the stack as
1032 * init entry point in network interface structure. It is also used
1033 * by the driver as a hw/sw initialization routine to get to a
1036 * return 0 on success, positive on failure
1037 **********************************************************************/
1040 ixl_init_locked(struct ixl_pf *pf)
1042 struct i40e_hw *hw = &pf->hw;
1043 struct ixl_vsi *vsi = &pf->vsi;
1044 struct ifnet *ifp = vsi->ifp;
1045 device_t dev = pf->dev;
1046 struct i40e_filter_control_settings filter;
1047 u8 tmpaddr[ETHER_ADDR_LEN];
1050 mtx_assert(&pf->pf_mtx, MA_OWNED);
1051 INIT_DEBUGOUT("ixl_init: begin");
1054 /* Get the latest mac address... User might use a LAA */
1055 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1056 I40E_ETH_LENGTH_OF_ADDRESS);
1057 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1058 i40e_validate_mac_addr(tmpaddr)) {
1059 bcopy(tmpaddr, hw->mac.addr,
1060 I40E_ETH_LENGTH_OF_ADDRESS);
1061 ret = i40e_aq_mac_address_write(hw,
1062 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1063 hw->mac.addr, NULL);
1065 device_printf(dev, "LLA address"
1066 "change failed!!\n");
1071 /* Set the various hardware offload abilities */
1072 ifp->if_hwassist = 0;
1073 if (ifp->if_capenable & IFCAP_TSO)
1074 ifp->if_hwassist |= CSUM_TSO;
1075 if (ifp->if_capenable & IFCAP_TXCSUM)
1076 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1077 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1078 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1080 /* Set up the device filtering */
1081 bzero(&filter, sizeof(filter));
1082 filter.enable_ethtype = TRUE;
1083 filter.enable_macvlan = TRUE;
1085 filter.enable_fdir = TRUE;
1087 if (i40e_set_filter_control(hw, &filter))
1088 device_printf(dev, "set_filter_control() failed\n");
1091 ixl_config_rss(vsi);
1097 ** Prepare the rings, hmc contexts, etc...
1099 if (ixl_initialize_vsi(vsi)) {
1100 device_printf(dev, "initialize vsi failed!!\n");
1104 /* Add protocol filters to list */
1105 ixl_init_filters(vsi);
1107 /* Setup vlan's if needed */
1108 ixl_setup_vlan_filters(vsi);
1110 /* Start the local timer */
1111 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1113 /* Set up MSI/X routing and the ITR settings */
1114 if (ixl_enable_msix) {
1115 ixl_configure_msix(pf);
1116 ixl_configure_itr(pf);
1118 ixl_configure_legacy(pf);
1120 ixl_enable_rings(vsi);
1122 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1124 /* Set MTU in hardware*/
1125 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1128 device_printf(vsi->dev,
1129 "aq_set_mac_config in init error, code %d\n",
1132 /* And now turn on interrupts */
1133 ixl_enable_intr(vsi);
1135 /* Now inform the stack we're ready */
1136 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1137 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1145 struct ixl_pf *pf = arg;
1148 ixl_init_locked(pf);
1155 ** MSIX Interrupt Handlers and Tasklets
1159 ixl_handle_que(void *context, int pending)
1161 struct ixl_queue *que = context;
1162 struct ixl_vsi *vsi = que->vsi;
1163 struct i40e_hw *hw = vsi->hw;
1164 struct tx_ring *txr = &que->txr;
1165 struct ifnet *ifp = vsi->ifp;
1168 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1169 more = ixl_rxeof(que, IXL_RX_LIMIT);
1172 if (!drbr_empty(ifp, txr->br))
1173 ixl_mq_start_locked(ifp, txr);
1176 taskqueue_enqueue(que->tq, &que->task);
1181 /* Reenable this interrupt - hmmm */
1182 ixl_enable_queue(hw, que->me);
1187 /*********************************************************************
1189 * Legacy Interrupt Service routine
1191 **********************************************************************/
1195 struct ixl_pf *pf = arg;
1196 struct i40e_hw *hw = &pf->hw;
1197 struct ixl_vsi *vsi = &pf->vsi;
1198 struct ixl_queue *que = vsi->queues;
1199 struct ifnet *ifp = vsi->ifp;
1200 struct tx_ring *txr = &que->txr;
1201 u32 reg, icr0, mask;
1202 bool more_tx, more_rx;
1206 /* Protect against spurious interrupts */
1207 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1210 icr0 = rd32(hw, I40E_PFINT_ICR0);
1212 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1213 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1214 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1216 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1218 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1219 taskqueue_enqueue(pf->tq, &pf->adminq);
1223 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1226 more_tx = ixl_txeof(que);
1227 if (!drbr_empty(vsi->ifp, txr->br))
1231 /* re-enable other interrupt causes */
1232 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1234 /* And now the queues */
1235 reg = rd32(hw, I40E_QINT_RQCTL(0));
1236 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1237 wr32(hw, I40E_QINT_RQCTL(0), reg);
1239 reg = rd32(hw, I40E_QINT_TQCTL(0));
1240 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1241 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1242 wr32(hw, I40E_QINT_TQCTL(0), reg);
1244 ixl_enable_legacy(hw);
1250 /*********************************************************************
1252 * MSIX VSI Interrupt Service routine
1254 **********************************************************************/
1256 ixl_msix_que(void *arg)
1258 struct ixl_queue *que = arg;
1259 struct ixl_vsi *vsi = que->vsi;
1260 struct i40e_hw *hw = vsi->hw;
1261 struct tx_ring *txr = &que->txr;
1262 bool more_tx, more_rx;
1264 /* Protect against spurious interrupts */
1265 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1270 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1273 more_tx = ixl_txeof(que);
1275 ** Make certain that if the stack
1276 ** has anything queued the task gets
1277 ** scheduled to handle it.
1279 if (!drbr_empty(vsi->ifp, txr->br))
1283 ixl_set_queue_rx_itr(que);
1284 ixl_set_queue_tx_itr(que);
1286 if (more_tx || more_rx)
1287 taskqueue_enqueue(que->tq, &que->task);
1289 ixl_enable_queue(hw, que->me);
1295 /*********************************************************************
1297 * MSIX Admin Queue Interrupt Service routine
1299 **********************************************************************/
1301 ixl_msix_adminq(void *arg)
1303 struct ixl_pf *pf = arg;
1304 struct i40e_hw *hw = &pf->hw;
1309 reg = rd32(hw, I40E_PFINT_ICR0);
1310 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1312 /* Check on the cause */
1313 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1314 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1316 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1317 ixl_handle_mdd_event(pf);
1318 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1321 if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1322 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1324 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1325 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1326 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1328 taskqueue_enqueue(pf->tq, &pf->adminq);
1332 /*********************************************************************
1334 * Media Ioctl callback
1336 * This routine is called whenever the user queries the status of
1337 * the interface using ifconfig.
1339 **********************************************************************/
1341 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1343 struct ixl_vsi *vsi = ifp->if_softc;
1344 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1345 struct i40e_hw *hw = &pf->hw;
1347 INIT_DEBUGOUT("ixl_media_status: begin");
1350 ixl_update_link_status(pf);
1352 ifmr->ifm_status = IFM_AVALID;
1353 ifmr->ifm_active = IFM_ETHER;
1355 if (!vsi->link_up) {
1360 ifmr->ifm_status |= IFM_ACTIVE;
1361 /* Hardware is always full-duplex */
1362 ifmr->ifm_active |= IFM_FDX;
1364 switch (hw->phy.link_info.phy_type) {
1366 case I40E_PHY_TYPE_100BASE_TX:
1367 ifmr->ifm_active |= IFM_100_TX;
1370 case I40E_PHY_TYPE_1000BASE_T:
1371 ifmr->ifm_active |= IFM_1000_T;
1373 case I40E_PHY_TYPE_1000BASE_SX:
1374 ifmr->ifm_active |= IFM_1000_SX;
1376 case I40E_PHY_TYPE_1000BASE_LX:
1377 ifmr->ifm_active |= IFM_1000_LX;
1380 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1381 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1382 ifmr->ifm_active |= IFM_10G_TWINAX;
1384 case I40E_PHY_TYPE_10GBASE_SR:
1385 ifmr->ifm_active |= IFM_10G_SR;
1387 case I40E_PHY_TYPE_10GBASE_LR:
1388 ifmr->ifm_active |= IFM_10G_LR;
1390 case I40E_PHY_TYPE_10GBASE_T:
1391 ifmr->ifm_active |= IFM_10G_T;
1394 case I40E_PHY_TYPE_40GBASE_CR4:
1395 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1396 ifmr->ifm_active |= IFM_40G_CR4;
1398 case I40E_PHY_TYPE_40GBASE_SR4:
1399 ifmr->ifm_active |= IFM_40G_SR4;
1401 case I40E_PHY_TYPE_40GBASE_LR4:
1402 ifmr->ifm_active |= IFM_40G_LR4;
1405 ifmr->ifm_active |= IFM_UNKNOWN;
1408 /* Report flow control status as well */
1409 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1410 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1411 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1412 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1419 /*********************************************************************
1421 * Media Ioctl callback
1423 * This routine is called when the user changes speed/duplex using
1424 * media/mediopt option with ifconfig.
1426 **********************************************************************/
1428 ixl_media_change(struct ifnet * ifp)
1430 struct ixl_vsi *vsi = ifp->if_softc;
1431 struct ifmedia *ifm = &vsi->media;
1433 INIT_DEBUGOUT("ixl_media_change: begin");
1435 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1438 if_printf(ifp, "Media change is currently not supported.\n");
1446 ** ATR: Application Targetted Receive - creates a filter
1447 ** based on TX flow info that will keep the receive
1448 ** portion of the flow on the same queue. Based on the
1449 ** implementation this is only available for TCP connections
1452 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1454 struct ixl_vsi *vsi = que->vsi;
1455 struct tx_ring *txr = &que->txr;
1456 struct i40e_filter_program_desc *FDIR;
1460 /* check if ATR is enabled and sample rate */
1461 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1464 ** We sample all TCP SYN/FIN packets,
1465 ** or at the selected sample rate
1468 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1469 (txr->atr_count < txr->atr_rate))
1473 /* Get a descriptor to use */
1474 idx = txr->next_avail;
1475 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1476 if (++idx == que->num_desc)
1479 txr->next_avail = idx;
1481 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1482 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1484 ptype |= (etype == ETHERTYPE_IP) ?
1485 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1486 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1487 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1488 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1490 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1492 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1495 ** We use the TCP TH_FIN as a trigger to remove
1496 ** the filter, otherwise its an update.
1498 dtype |= (th->th_flags & TH_FIN) ?
1499 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1500 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1501 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1502 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1504 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1505 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1507 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1508 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1510 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1511 FDIR->dtype_cmd_cntindex = htole32(dtype);
1518 ixl_set_promisc(struct ixl_vsi *vsi)
1520 struct ifnet *ifp = vsi->ifp;
1521 struct i40e_hw *hw = vsi->hw;
1523 bool uni = FALSE, multi = FALSE;
1525 if (ifp->if_flags & IFF_ALLMULTI)
1527 else { /* Need to count the multicast addresses */
1528 struct ifmultiaddr *ifma;
1529 if_maddr_rlock(ifp);
1530 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1531 if (ifma->ifma_addr->sa_family != AF_LINK)
1533 if (mcnt == MAX_MULTICAST_ADDR)
1537 if_maddr_runlock(ifp);
1540 if (mcnt >= MAX_MULTICAST_ADDR)
1542 if (ifp->if_flags & IFF_PROMISC)
1545 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1546 vsi->seid, uni, NULL);
1547 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1548 vsi->seid, multi, NULL);
1552 /*********************************************************************
1555 * Routines for multicast and vlan filter management.
1557 *********************************************************************/
1559 ixl_add_multi(struct ixl_vsi *vsi)
1561 struct ifmultiaddr *ifma;
1562 struct ifnet *ifp = vsi->ifp;
1563 struct i40e_hw *hw = vsi->hw;
1564 int mcnt = 0, flags;
1566 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1568 if_maddr_rlock(ifp);
1570 ** First just get a count, to decide if we
1571 ** we simply use multicast promiscuous.
1573 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1574 if (ifma->ifma_addr->sa_family != AF_LINK)
1578 if_maddr_runlock(ifp);
1580 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1581 /* delete existing MC filters */
1582 ixl_del_hw_filters(vsi, mcnt);
1583 i40e_aq_set_vsi_multicast_promiscuous(hw,
1584 vsi->seid, TRUE, NULL);
1589 if_maddr_rlock(ifp);
1590 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1591 if (ifma->ifma_addr->sa_family != AF_LINK)
1593 ixl_add_mc_filter(vsi,
1594 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1597 if_maddr_runlock(ifp);
1599 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1600 ixl_add_hw_filters(vsi, flags, mcnt);
1603 IOCTL_DEBUGOUT("ixl_add_multi: end");
1608 ixl_del_multi(struct ixl_vsi *vsi)
1610 struct ifnet *ifp = vsi->ifp;
1611 struct ifmultiaddr *ifma;
1612 struct ixl_mac_filter *f;
1616 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1618 /* Search for removed multicast addresses */
1619 if_maddr_rlock(ifp);
1620 SLIST_FOREACH(f, &vsi->ftl, next) {
1621 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1623 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1624 if (ifma->ifma_addr->sa_family != AF_LINK)
1626 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1627 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1632 if (match == FALSE) {
1633 f->flags |= IXL_FILTER_DEL;
1638 if_maddr_runlock(ifp);
1641 ixl_del_hw_filters(vsi, mcnt);
1645 /*********************************************************************
1648 * This routine checks for link status,updates statistics,
1649 * and runs the watchdog check.
1651 **********************************************************************/
1654 ixl_local_timer(void *arg)
1656 struct ixl_pf *pf = arg;
1657 struct i40e_hw *hw = &pf->hw;
1658 struct ixl_vsi *vsi = &pf->vsi;
1659 struct ixl_queue *que = vsi->queues;
1660 device_t dev = pf->dev;
1664 mtx_assert(&pf->pf_mtx, MA_OWNED);
1666 /* Fire off the adminq task */
1667 taskqueue_enqueue(pf->tq, &pf->adminq);
1670 ixl_update_stats_counters(pf);
1673 ** Check status of the queues
1675 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1676 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1678 for (int i = 0; i < vsi->num_queues; i++,que++) {
1679 /* Any queues with outstanding work get a sw irq */
1681 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1683 ** Each time txeof runs without cleaning, but there
1684 ** are uncleaned descriptors it increments busy. If
1685 ** we get to 5 we declare it hung.
1687 if (que->busy == IXL_QUEUE_HUNG) {
1689 /* Mark the queue as inactive */
1690 vsi->active_queues &= ~((u64)1 << que->me);
1693 /* Check if we've come back from hung */
1694 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1695 vsi->active_queues |= ((u64)1 << que->me);
1697 if (que->busy >= IXL_MAX_TX_BUSY) {
1698 device_printf(dev,"Warning queue %d "
1699 "appears to be hung!\n", i);
1700 que->busy = IXL_QUEUE_HUNG;
1704 /* Only reinit if all queues show hung */
1705 if (hung == vsi->num_queues)
1708 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1712 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1713 ixl_init_locked(pf);
1717 ** Note: this routine updates the OS on the link state
1718 ** the real check of the hardware only happens with
1719 ** a link interrupt.
1722 ixl_update_link_status(struct ixl_pf *pf)
1724 struct ixl_vsi *vsi = &pf->vsi;
1725 struct i40e_hw *hw = &pf->hw;
1726 struct ifnet *ifp = vsi->ifp;
1727 device_t dev = pf->dev;
1728 enum i40e_fc_mode fc;
1732 if (vsi->link_active == FALSE) {
1733 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1735 fc = hw->fc.current_mode;
1736 device_printf(dev,"Link is up %d Gbps %s,"
1737 " Flow Control: %s\n",
1738 ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1739 "Full Duplex", ixl_fc_string[fc]);
1741 vsi->link_active = TRUE;
1742 if_link_state_change(ifp, LINK_STATE_UP);
1744 } else { /* Link down */
1745 if (vsi->link_active == TRUE) {
1747 device_printf(dev,"Link is Down\n");
1748 if_link_state_change(ifp, LINK_STATE_DOWN);
1749 vsi->link_active = FALSE;
1756 /*********************************************************************
1758 * This routine disables all traffic on the adapter by issuing a
1759 * global reset on the MAC and deallocates TX/RX buffers.
1761 **********************************************************************/
1764 ixl_stop(struct ixl_pf *pf)
1766 struct ixl_vsi *vsi = &pf->vsi;
1767 struct ifnet *ifp = vsi->ifp;
1769 mtx_assert(&pf->pf_mtx, MA_OWNED);
1771 INIT_DEBUGOUT("ixl_stop: begin\n");
1772 ixl_disable_intr(vsi);
1773 ixl_disable_rings(vsi);
1775 /* Tell the stack that the interface is no longer active */
1776 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1778 /* Stop the local timer */
1779 callout_stop(&pf->timer);
1785 /*********************************************************************
1787 * Setup MSIX Interrupt resources and handlers for the VSI
1789 **********************************************************************/
1791 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1793 device_t dev = pf->dev;
1794 struct ixl_vsi *vsi = &pf->vsi;
1795 struct ixl_queue *que = vsi->queues;
1800 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1801 &rid, RF_SHAREABLE | RF_ACTIVE);
1802 if (pf->res == NULL) {
1803 device_printf(dev,"Unable to allocate"
1804 " bus resource: vsi legacy/msi interrupt\n");
1808 /* Set the handler function */
1809 error = bus_setup_intr(dev, pf->res,
1810 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1811 ixl_intr, pf, &pf->tag);
1814 device_printf(dev, "Failed to register legacy/msi handler");
1817 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1818 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1819 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1820 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1821 taskqueue_thread_enqueue, &que->tq);
1822 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1823 device_get_nameunit(dev));
1824 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1825 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1826 taskqueue_thread_enqueue, &pf->tq);
1827 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1828 device_get_nameunit(dev));
1834 /*********************************************************************
1836 * Setup MSIX Interrupt resources and handlers for the VSI
1838 **********************************************************************/
1840 ixl_assign_vsi_msix(struct ixl_pf *pf)
1842 device_t dev = pf->dev;
1843 struct ixl_vsi *vsi = &pf->vsi;
1844 struct ixl_queue *que = vsi->queues;
1845 struct tx_ring *txr;
1846 int error, rid, vector = 0;
1848 /* Admin Que is vector 0*/
1850 pf->res = bus_alloc_resource_any(dev,
1851 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1853 device_printf(dev,"Unable to allocate"
1854 " bus resource: Adminq interrupt [%d]\n", rid);
1857 /* Set the adminq vector and handler */
1858 error = bus_setup_intr(dev, pf->res,
1859 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1860 ixl_msix_adminq, pf, &pf->tag);
1863 device_printf(dev, "Failed to register Admin que handler");
1866 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1867 pf->admvec = vector;
1868 /* Tasklet for Admin Queue */
1869 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1870 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1871 taskqueue_thread_enqueue, &pf->tq);
1872 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1873 device_get_nameunit(pf->dev));
1876 /* Now set up the stations */
1877 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1880 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1881 RF_SHAREABLE | RF_ACTIVE);
1882 if (que->res == NULL) {
1883 device_printf(dev,"Unable to allocate"
1884 " bus resource: que interrupt [%d]\n", vector);
1887 /* Set the handler function */
1888 error = bus_setup_intr(dev, que->res,
1889 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1890 ixl_msix_que, que, &que->tag);
1893 device_printf(dev, "Failed to register que handler");
1896 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1897 /* Bind the vector to a CPU */
1898 bus_bind_intr(dev, que->res, i);
1900 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1901 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1902 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1903 taskqueue_thread_enqueue, &que->tq);
1904 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1905 device_get_nameunit(pf->dev));
1913 * Allocate MSI/X vectors
1916 ixl_init_msix(struct ixl_pf *pf)
1918 device_t dev = pf->dev;
1919 int rid, want, vectors, queues, available;
1921 /* Override by tuneable */
1922 if (ixl_enable_msix == 0)
1926 ** When used in a virtualized environment
1927 ** PCI BUSMASTER capability may not be set
1928 ** so explicity set it here and rewrite
1929 ** the ENABLE in the MSIX control register
1930 ** at this point to cause the host to
1931 ** successfully initialize us.
1936 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1937 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1938 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1939 pci_find_cap(dev, PCIY_MSIX, &rid);
1940 rid += PCIR_MSIX_CTRL;
1941 msix_ctrl = pci_read_config(dev, rid, 2);
1942 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1943 pci_write_config(dev, rid, msix_ctrl, 2);
1946 /* First try MSI/X */
1947 rid = PCIR_BAR(IXL_BAR);
1948 pf->msix_mem = bus_alloc_resource_any(dev,
1949 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1950 if (!pf->msix_mem) {
1951 /* May not be enabled */
1952 device_printf(pf->dev,
1953 "Unable to map MSIX table \n");
1957 available = pci_msix_count(dev);
1958 if (available == 0) { /* system has msix disabled */
1959 bus_release_resource(dev, SYS_RES_MEMORY,
1961 pf->msix_mem = NULL;
1965 /* Figure out a reasonable auto config value */
1966 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1968 /* Override with hardcoded value if sane */
1969 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1970 queues = ixl_max_queues;
1973 ** Want one vector (RX/TX pair) per queue
1974 ** plus an additional for the admin queue.
1977 if (want <= available) /* Have enough */
1980 device_printf(pf->dev,
1981 "MSIX Configuration Problem, "
1982 "%d vectors available but %d wanted!\n",
1984 return (0); /* Will go to Legacy setup */
1987 if (pci_alloc_msix(dev, &vectors) == 0) {
1988 device_printf(pf->dev,
1989 "Using MSIX interrupts with %d vectors\n", vectors);
1991 pf->vsi.num_queues = queues;
1995 vectors = pci_msi_count(dev);
1996 pf->vsi.num_queues = 1;
1999 ixl_enable_msix = 0;
2000 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2001 device_printf(pf->dev,"Using an MSI interrupt\n");
2004 device_printf(pf->dev,"Using a Legacy interrupt\n");
2011 * Plumb MSI/X vectors
2014 ixl_configure_msix(struct ixl_pf *pf)
2016 struct i40e_hw *hw = &pf->hw;
2017 struct ixl_vsi *vsi = &pf->vsi;
2021 /* First set up the adminq - vector 0 */
2022 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2023 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2025 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2026 I40E_PFINT_ICR0_ENA_GRST_MASK |
2027 I40E_PFINT_ICR0_HMC_ERR_MASK |
2028 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2029 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2030 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2031 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2032 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2034 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2035 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2037 wr32(hw, I40E_PFINT_DYN_CTL0,
2038 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2039 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2041 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2043 /* Next configure the queues */
2044 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2045 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2046 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2048 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2049 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2050 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2051 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2052 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2053 wr32(hw, I40E_QINT_RQCTL(i), reg);
2055 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2056 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2057 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2058 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2059 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2060 if (i == (vsi->num_queues - 1))
2061 reg |= (IXL_QUEUE_EOL
2062 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2063 wr32(hw, I40E_QINT_TQCTL(i), reg);
2068 * Configure for MSI single vector operation
2071 ixl_configure_legacy(struct ixl_pf *pf)
2073 struct i40e_hw *hw = &pf->hw;
2077 wr32(hw, I40E_PFINT_ITR0(0), 0);
2078 wr32(hw, I40E_PFINT_ITR0(1), 0);
2081 /* Setup "other" causes */
2082 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2083 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2084 | I40E_PFINT_ICR0_ENA_GRST_MASK
2085 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2086 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2087 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2088 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2089 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2090 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2091 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2093 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2095 /* SW_ITR_IDX = 0, but don't change INTENA */
2096 wr32(hw, I40E_PFINT_DYN_CTL0,
2097 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2098 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2099 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2100 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2102 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2103 wr32(hw, I40E_PFINT_LNKLST0, 0);
2105 /* Associate the queue pair to the vector and enable the q int */
2106 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2107 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2108 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2109 wr32(hw, I40E_QINT_RQCTL(0), reg);
2111 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2112 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2113 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2114 wr32(hw, I40E_QINT_TQCTL(0), reg);
2116 /* Next enable the queue pair */
2117 reg = rd32(hw, I40E_QTX_ENA(0));
2118 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2119 wr32(hw, I40E_QTX_ENA(0), reg);
2121 reg = rd32(hw, I40E_QRX_ENA(0));
2122 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2123 wr32(hw, I40E_QRX_ENA(0), reg);
2128 * Set the Initial ITR state
2131 ixl_configure_itr(struct ixl_pf *pf)
2133 struct i40e_hw *hw = &pf->hw;
2134 struct ixl_vsi *vsi = &pf->vsi;
2135 struct ixl_queue *que = vsi->queues;
2137 vsi->rx_itr_setting = ixl_rx_itr;
2138 if (ixl_dynamic_rx_itr)
2139 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2140 vsi->tx_itr_setting = ixl_tx_itr;
2141 if (ixl_dynamic_tx_itr)
2142 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2144 for (int i = 0; i < vsi->num_queues; i++, que++) {
2145 struct tx_ring *txr = &que->txr;
2146 struct rx_ring *rxr = &que->rxr;
2148 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2149 vsi->rx_itr_setting);
2150 rxr->itr = vsi->rx_itr_setting;
2151 rxr->latency = IXL_AVE_LATENCY;
2152 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2153 vsi->tx_itr_setting);
2154 txr->itr = vsi->tx_itr_setting;
2155 txr->latency = IXL_AVE_LATENCY;
2161 ixl_allocate_pci_resources(struct ixl_pf *pf)
2164 device_t dev = pf->dev;
2167 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2170 if (!(pf->pci_mem)) {
2171 device_printf(dev,"Unable to allocate bus resource: memory\n");
2175 pf->osdep.mem_bus_space_tag =
2176 rman_get_bustag(pf->pci_mem);
2177 pf->osdep.mem_bus_space_handle =
2178 rman_get_bushandle(pf->pci_mem);
2179 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2180 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2181 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2183 pf->hw.back = &pf->osdep;
2186 ** Now setup MSI or MSI/X, should
2187 ** return us the number of supported
2188 ** vectors. (Will be 1 for MSI)
2190 pf->msix = ixl_init_msix(pf);
2195 ixl_free_pci_resources(struct ixl_pf * pf)
2197 struct ixl_vsi *vsi = &pf->vsi;
2198 struct ixl_queue *que = vsi->queues;
2199 device_t dev = pf->dev;
2202 memrid = PCIR_BAR(IXL_BAR);
2204 /* We may get here before stations are setup */
2205 if ((!ixl_enable_msix) || (que == NULL))
2209 ** Release all msix VSI resources:
2211 for (int i = 0; i < vsi->num_queues; i++, que++) {
2212 rid = que->msix + 1;
2213 if (que->tag != NULL) {
2214 bus_teardown_intr(dev, que->res, que->tag);
2217 if (que->res != NULL)
2218 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2222 /* Clean the AdminQ interrupt last */
2223 if (pf->admvec) /* we are doing MSIX */
2224 rid = pf->admvec + 1;
2226 (pf->msix != 0) ? (rid = 1):(rid = 0);
2228 if (pf->tag != NULL) {
2229 bus_teardown_intr(dev, pf->res, pf->tag);
2232 if (pf->res != NULL)
2233 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2236 pci_release_msi(dev);
2238 if (pf->msix_mem != NULL)
2239 bus_release_resource(dev, SYS_RES_MEMORY,
2240 memrid, pf->msix_mem);
2242 if (pf->pci_mem != NULL)
2243 bus_release_resource(dev, SYS_RES_MEMORY,
2244 PCIR_BAR(0), pf->pci_mem);
2250 /*********************************************************************
2252 * Setup networking device structure and register an interface.
2254 **********************************************************************/
2256 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2259 struct i40e_hw *hw = vsi->hw;
2260 struct ixl_queue *que = vsi->queues;
2261 struct i40e_aq_get_phy_abilities_resp abilities_resp;
2262 enum i40e_status_code aq_error = 0;
2264 INIT_DEBUGOUT("ixl_setup_interface: begin");
2266 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2268 device_printf(dev, "can not allocate ifnet structure\n");
2271 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2272 ifp->if_mtu = ETHERMTU;
2273 ifp->if_baudrate = 4000000000; // ??
2274 ifp->if_init = ixl_init;
2275 ifp->if_softc = vsi;
2276 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2277 ifp->if_ioctl = ixl_ioctl;
2279 #if __FreeBSD_version >= 1100000
2280 if_setgetcounterfn(ifp, ixl_get_counter);
2283 ifp->if_transmit = ixl_mq_start;
2285 ifp->if_qflush = ixl_qflush;
2287 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2289 ether_ifattach(ifp, hw->mac.addr);
2291 vsi->max_frame_size =
2292 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2293 + ETHER_VLAN_ENCAP_LEN;
2296 * Tell the upper layer(s) we support long frames.
2298 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2300 ifp->if_capabilities |= IFCAP_HWCSUM;
2301 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2302 ifp->if_capabilities |= IFCAP_TSO;
2303 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2304 ifp->if_capabilities |= IFCAP_LRO;
2306 /* VLAN capabilties */
2307 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2310 | IFCAP_VLAN_HWCSUM;
2311 ifp->if_capenable = ifp->if_capabilities;
2314 ** Don't turn this on by default, if vlans are
2315 ** created on another pseudo device (eg. lagg)
2316 ** then vlan events are not passed thru, breaking
2317 ** operation, but with HW FILTER off it works. If
2318 ** using vlans directly on the ixl driver you can
2319 ** enable this and get full hardware tag filtering.
2321 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2324 * Specify the media types supported by this adapter and register
2325 * callbacks to update media and link information
2327 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2330 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2332 printf("Error getting supported media types, AQ error %d\n", aq_error);
2336 /* Display supported media types */
2337 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2338 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2340 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2341 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2343 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2344 abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2345 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2346 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2347 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2348 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2349 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2350 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2351 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2353 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2354 abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2355 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2356 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2357 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2358 if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2359 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2361 /* Use autoselect media by default */
2362 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2363 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2369 ixl_config_link(struct i40e_hw *hw)
2373 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2374 check = i40e_get_link_status(hw);
2376 printf("Link is %s\n", check ? "up":"down");
2381 /*********************************************************************
2383 * Initialize this VSI
2385 **********************************************************************/
2387 ixl_setup_vsi(struct ixl_vsi *vsi)
2389 struct i40e_hw *hw = vsi->hw;
2390 device_t dev = vsi->dev;
2391 struct i40e_aqc_get_switch_config_resp *sw_config;
2392 struct i40e_vsi_context ctxt;
2393 u8 aq_buf[I40E_AQ_LARGE_BUF];
2394 int ret = I40E_SUCCESS;
2397 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2398 ret = i40e_aq_get_switch_config(hw, sw_config,
2399 sizeof(aq_buf), &next, NULL);
2401 device_printf(dev,"aq_get_switch_config failed!!\n");
2405 printf("Switch config: header reported: %d in structure, %d total\n",
2406 sw_config->header.num_reported, sw_config->header.num_total);
2407 printf("type=%d seid=%d uplink=%d downlink=%d\n",
2408 sw_config->element[0].element_type,
2409 sw_config->element[0].seid,
2410 sw_config->element[0].uplink_seid,
2411 sw_config->element[0].downlink_seid);
2413 /* Save off this important value */
2414 vsi->seid = sw_config->element[0].seid;
2416 memset(&ctxt, 0, sizeof(ctxt));
2417 ctxt.seid = vsi->seid;
2418 ctxt.pf_num = hw->pf_id;
2419 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2421 device_printf(dev,"get vsi params failed %x!!\n", ret);
2425 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2426 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2427 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2428 ctxt.uplink_seid, ctxt.vsi_number,
2429 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2430 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2431 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2434 ** Set the queue and traffic class bits
2435 ** - when multiple traffic classes are supported
2436 ** this will need to be more robust.
2438 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2439 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2440 ctxt.info.queue_mapping[0] = 0;
2441 ctxt.info.tc_mapping[0] = 0x0800;
2443 /* Set VLAN receive stripping mode */
2444 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2445 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2446 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2447 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2449 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2451 /* Keep copy of VSI info in VSI for statistic counters */
2452 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2454 /* Reset VSI statistics */
2455 ixl_vsi_reset_stats(vsi);
2456 vsi->hw_filters_add = 0;
2457 vsi->hw_filters_del = 0;
2459 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2461 device_printf(dev,"update vsi params failed %x!!\n",
2462 hw->aq.asq_last_status);
2467 /*********************************************************************
2469 * Initialize the VSI: this handles contexts, which means things
2470 * like the number of descriptors, buffer size,
2471 * plus we init the rings thru this function.
2473 **********************************************************************/
2475 ixl_initialize_vsi(struct ixl_vsi *vsi)
2477 struct ixl_queue *que = vsi->queues;
2478 device_t dev = vsi->dev;
2479 struct i40e_hw *hw = vsi->hw;
2483 for (int i = 0; i < vsi->num_queues; i++, que++) {
2484 struct tx_ring *txr = &que->txr;
2485 struct rx_ring *rxr = &que->rxr;
2486 struct i40e_hmc_obj_txq tctx;
2487 struct i40e_hmc_obj_rxq rctx;
2492 /* Setup the HMC TX Context */
2493 size = que->num_desc * sizeof(struct i40e_tx_desc);
2494 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2495 tctx.new_context = 1;
2496 tctx.base = (txr->dma.pa/128);
2497 tctx.qlen = que->num_desc;
2499 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2500 /* Enable HEAD writeback */
2501 tctx.head_wb_ena = 1;
2502 tctx.head_wb_addr = txr->dma.pa +
2503 (que->num_desc * sizeof(struct i40e_tx_desc));
2504 tctx.rdylist_act = 0;
2505 err = i40e_clear_lan_tx_queue_context(hw, i);
2507 device_printf(dev, "Unable to clear TX context\n");
2510 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2512 device_printf(dev, "Unable to set TX context\n");
2515 /* Associate the ring with this PF */
2516 txctl = I40E_QTX_CTL_PF_QUEUE;
2517 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2518 I40E_QTX_CTL_PF_INDX_MASK);
2519 wr32(hw, I40E_QTX_CTL(i), txctl);
2522 /* Do ring (re)init */
2523 ixl_init_tx_ring(que);
2525 /* Next setup the HMC RX Context */
2526 if (vsi->max_frame_size <= 2048)
2527 rxr->mbuf_sz = MCLBYTES;
2529 rxr->mbuf_sz = MJUMPAGESIZE;
2531 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2533 /* Set up an RX context for the HMC */
2534 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2535 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2536 /* ignore header split for now */
2537 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2538 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2539 vsi->max_frame_size : max_rxmax;
2541 rctx.dsize = 1; /* do 32byte descriptors */
2542 rctx.hsplit_0 = 0; /* no HDR split initially */
2543 rctx.base = (rxr->dma.pa/128);
2544 rctx.qlen = que->num_desc;
2545 rctx.tphrdesc_ena = 1;
2546 rctx.tphwdesc_ena = 1;
2547 rctx.tphdata_ena = 0;
2548 rctx.tphhead_ena = 0;
2549 rctx.lrxqthresh = 2;
2556 err = i40e_clear_lan_rx_queue_context(hw, i);
2559 "Unable to clear RX context %d\n", i);
2562 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2564 device_printf(dev, "Unable to set RX context %d\n", i);
2567 err = ixl_init_rx_ring(que);
2569 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2572 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2573 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2579 /*********************************************************************
2581 * Free all VSI structs.
2583 **********************************************************************/
2585 ixl_free_vsi(struct ixl_vsi *vsi)
2587 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2588 struct ixl_queue *que = vsi->queues;
2589 struct ixl_mac_filter *f;
2591 /* Free station queues */
2592 for (int i = 0; i < vsi->num_queues; i++, que++) {
2593 struct tx_ring *txr = &que->txr;
2594 struct rx_ring *rxr = &que->rxr;
2596 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2599 ixl_free_que_tx(que);
2601 i40e_free_dma_mem(&pf->hw, &txr->dma);
2603 IXL_TX_LOCK_DESTROY(txr);
2605 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2608 ixl_free_que_rx(que);
2610 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2612 IXL_RX_LOCK_DESTROY(rxr);
2615 free(vsi->queues, M_DEVBUF);
2617 /* Free VSI filter list */
2618 while (!SLIST_EMPTY(&vsi->ftl)) {
2619 f = SLIST_FIRST(&vsi->ftl);
2620 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2626 /*********************************************************************
2628 * Allocate memory for the VSI (virtual station interface) and their
2629 * associated queues, rings and the descriptors associated with each,
2630 * called only once at attach.
2632 **********************************************************************/
2634 ixl_setup_stations(struct ixl_pf *pf)
2636 device_t dev = pf->dev;
2637 struct ixl_vsi *vsi;
2638 struct ixl_queue *que;
2639 struct tx_ring *txr;
2640 struct rx_ring *rxr;
2642 int error = I40E_SUCCESS;
2645 vsi->back = (void *)pf;
2650 /* Get memory for the station queues */
2652 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2653 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2654 device_printf(dev, "Unable to allocate queue memory\n");
2659 for (int i = 0; i < vsi->num_queues; i++) {
2660 que = &vsi->queues[i];
2661 que->num_desc = ixl_ringsz;
2664 /* mark the queue as active */
2665 vsi->active_queues |= (u64)1 << que->me;
2668 txr->tail = I40E_QTX_TAIL(que->me);
2670 /* Initialize the TX lock */
2671 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2672 device_get_nameunit(dev), que->me);
2673 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2674 /* Create the TX descriptor ring */
2675 tsize = roundup2((que->num_desc *
2676 sizeof(struct i40e_tx_desc)) +
2677 sizeof(u32), DBA_ALIGN);
2678 if (i40e_allocate_dma_mem(&pf->hw,
2679 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2681 "Unable to allocate TX Descriptor memory\n");
2685 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2686 bzero((void *)txr->base, tsize);
2687 /* Now allocate transmit soft structs for the ring */
2688 if (ixl_allocate_tx_data(que)) {
2690 "Critical Failure setting up TX structures\n");
2694 /* Allocate a buf ring */
2695 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2696 M_WAITOK, &txr->mtx);
2697 if (txr->br == NULL) {
2699 "Critical Failure setting up TX buf ring\n");
2705 * Next the RX queues...
2707 rsize = roundup2(que->num_desc *
2708 sizeof(union i40e_rx_desc), DBA_ALIGN);
2711 rxr->tail = I40E_QRX_TAIL(que->me);
2713 /* Initialize the RX side lock */
2714 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2715 device_get_nameunit(dev), que->me);
2716 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2718 if (i40e_allocate_dma_mem(&pf->hw,
2719 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2721 "Unable to allocate RX Descriptor memory\n");
2725 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2726 bzero((void *)rxr->base, rsize);
2728 /* Allocate receive soft structs for the ring*/
2729 if (ixl_allocate_rx_data(que)) {
2731 "Critical Failure setting up receive structs\n");
2740 for (int i = 0; i < vsi->num_queues; i++) {
2741 que = &vsi->queues[i];
2745 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2747 i40e_free_dma_mem(&pf->hw, &txr->dma);
2755 ** Provide a update to the queue RX
2756 ** interrupt moderation value.
2759 ixl_set_queue_rx_itr(struct ixl_queue *que)
2761 struct ixl_vsi *vsi = que->vsi;
2762 struct i40e_hw *hw = vsi->hw;
2763 struct rx_ring *rxr = &que->rxr;
2769 /* Idle, do nothing */
2770 if (rxr->bytes == 0)
2773 if (ixl_dynamic_rx_itr) {
2774 rx_bytes = rxr->bytes/rxr->itr;
2777 /* Adjust latency range */
2778 switch (rxr->latency) {
2779 case IXL_LOW_LATENCY:
2780 if (rx_bytes > 10) {
2781 rx_latency = IXL_AVE_LATENCY;
2782 rx_itr = IXL_ITR_20K;
2785 case IXL_AVE_LATENCY:
2786 if (rx_bytes > 20) {
2787 rx_latency = IXL_BULK_LATENCY;
2788 rx_itr = IXL_ITR_8K;
2789 } else if (rx_bytes <= 10) {
2790 rx_latency = IXL_LOW_LATENCY;
2791 rx_itr = IXL_ITR_100K;
2794 case IXL_BULK_LATENCY:
2795 if (rx_bytes <= 20) {
2796 rx_latency = IXL_AVE_LATENCY;
2797 rx_itr = IXL_ITR_20K;
2802 rxr->latency = rx_latency;
2804 if (rx_itr != rxr->itr) {
2805 /* do an exponential smoothing */
2806 rx_itr = (10 * rx_itr * rxr->itr) /
2807 ((9 * rx_itr) + rxr->itr);
2808 rxr->itr = rx_itr & IXL_MAX_ITR;
2809 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2810 que->me), rxr->itr);
2812 } else { /* We may have have toggled to non-dynamic */
2813 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2814 vsi->rx_itr_setting = ixl_rx_itr;
2815 /* Update the hardware if needed */
2816 if (rxr->itr != vsi->rx_itr_setting) {
2817 rxr->itr = vsi->rx_itr_setting;
2818 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2819 que->me), rxr->itr);
2829 ** Provide a update to the queue TX
2830 ** interrupt moderation value.
2833 ixl_set_queue_tx_itr(struct ixl_queue *que)
2835 struct ixl_vsi *vsi = que->vsi;
2836 struct i40e_hw *hw = vsi->hw;
2837 struct tx_ring *txr = &que->txr;
2843 /* Idle, do nothing */
2844 if (txr->bytes == 0)
2847 if (ixl_dynamic_tx_itr) {
2848 tx_bytes = txr->bytes/txr->itr;
2851 switch (txr->latency) {
2852 case IXL_LOW_LATENCY:
2853 if (tx_bytes > 10) {
2854 tx_latency = IXL_AVE_LATENCY;
2855 tx_itr = IXL_ITR_20K;
2858 case IXL_AVE_LATENCY:
2859 if (tx_bytes > 20) {
2860 tx_latency = IXL_BULK_LATENCY;
2861 tx_itr = IXL_ITR_8K;
2862 } else if (tx_bytes <= 10) {
2863 tx_latency = IXL_LOW_LATENCY;
2864 tx_itr = IXL_ITR_100K;
2867 case IXL_BULK_LATENCY:
2868 if (tx_bytes <= 20) {
2869 tx_latency = IXL_AVE_LATENCY;
2870 tx_itr = IXL_ITR_20K;
2875 txr->latency = tx_latency;
2877 if (tx_itr != txr->itr) {
2878 /* do an exponential smoothing */
2879 tx_itr = (10 * tx_itr * txr->itr) /
2880 ((9 * tx_itr) + txr->itr);
2881 txr->itr = tx_itr & IXL_MAX_ITR;
2882 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2883 que->me), txr->itr);
2886 } else { /* We may have have toggled to non-dynamic */
2887 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2888 vsi->tx_itr_setting = ixl_tx_itr;
2889 /* Update the hardware if needed */
2890 if (txr->itr != vsi->tx_itr_setting) {
2891 txr->itr = vsi->tx_itr_setting;
2892 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2893 que->me), txr->itr);
2903 ixl_add_hw_stats(struct ixl_pf *pf)
2905 device_t dev = pf->dev;
2906 struct ixl_vsi *vsi = &pf->vsi;
2907 struct ixl_queue *queues = vsi->queues;
2908 struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2909 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2911 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2912 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2913 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2915 struct sysctl_oid *vsi_node, *queue_node;
2916 struct sysctl_oid_list *vsi_list, *queue_list;
2918 struct tx_ring *txr;
2919 struct rx_ring *rxr;
2921 /* Driver statistics */
2922 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2923 CTLFLAG_RD, &pf->watchdog_events,
2924 "Watchdog timeouts");
2925 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2926 CTLFLAG_RD, &pf->admin_irq,
2927 "Admin Queue IRQ Handled");
2929 /* VSI statistics */
2930 #define QUEUE_NAME_LEN 32
2931 char queue_namebuf[QUEUE_NAME_LEN];
2933 // ERJ: Only one vsi now, re-do when >1 VSI enabled
2934 // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2935 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2936 CTLFLAG_RD, NULL, "VSI-specific stats");
2937 vsi_list = SYSCTL_CHILDREN(vsi_node);
2939 ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2941 /* Queue statistics */
2942 for (int q = 0; q < vsi->num_queues; q++) {
2943 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2944 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2945 CTLFLAG_RD, NULL, "Queue #");
2946 queue_list = SYSCTL_CHILDREN(queue_node);
2948 txr = &(queues[q].txr);
2949 rxr = &(queues[q].rxr);
2951 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2952 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2953 "m_defrag() failed");
2954 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2955 CTLFLAG_RD, &(queues[q].dropped_pkts),
2956 "Driver dropped packets");
2957 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2958 CTLFLAG_RD, &(queues[q].irqs),
2959 "irqs on this queue");
2960 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2961 CTLFLAG_RD, &(queues[q].tso),
2963 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2964 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2965 "Driver tx dma failure in xmit");
2966 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2967 CTLFLAG_RD, &(txr->no_desc),
2968 "Queue No Descriptor Available");
2969 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2970 CTLFLAG_RD, &(txr->total_packets),
2971 "Queue Packets Transmitted");
2972 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2973 CTLFLAG_RD, &(txr->tx_bytes),
2974 "Queue Bytes Transmitted");
2975 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2976 CTLFLAG_RD, &(rxr->rx_packets),
2977 "Queue Packets Received");
2978 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2979 CTLFLAG_RD, &(rxr->rx_bytes),
2980 "Queue Bytes Received");
2984 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2988 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2989 struct sysctl_oid_list *child,
2990 struct i40e_eth_stats *eth_stats)
2992 struct ixl_sysctl_info ctls[] =
2994 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2995 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
2996 "Unicast Packets Received"},
2997 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
2998 "Multicast Packets Received"},
2999 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3000 "Broadcast Packets Received"},
3001 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3002 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3003 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3004 {ð_stats->tx_multicast, "mcast_pkts_txd",
3005 "Multicast Packets Transmitted"},
3006 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3007 "Broadcast Packets Transmitted"},
3008 {ð_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3013 struct ixl_sysctl_info *entry = ctls;
3014 while (entry->stat != 0)
3016 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3017 CTLFLAG_RD, entry->stat,
3018 entry->description);
3024 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3025 struct sysctl_oid_list *child,
3026 struct i40e_hw_port_stats *stats)
3028 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3029 CTLFLAG_RD, NULL, "Mac Statistics");
3030 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3032 struct i40e_eth_stats *eth_stats = &stats->eth;
3033 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3035 struct ixl_sysctl_info ctls[] =
3037 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3038 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3039 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3040 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3041 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3042 /* Packet Reception Stats */
3043 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3044 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3045 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3046 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3047 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3048 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3049 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3050 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3051 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3052 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3053 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3054 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3055 /* Packet Transmission Stats */
3056 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3057 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3058 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3059 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3060 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3061 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3062 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3064 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3065 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3066 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3067 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3072 struct ixl_sysctl_info *entry = ctls;
3073 while (entry->stat != 0)
3075 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3076 CTLFLAG_RD, entry->stat,
3077 entry->description);
3083 ** ixl_config_rss - setup RSS
3084 ** - note this is done for the single vsi
3086 static void ixl_config_rss(struct ixl_vsi *vsi)
3088 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3089 struct i40e_hw *hw = vsi->hw;
3094 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3095 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3096 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3097 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3099 /* Fill out hash function seed */
3100 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3101 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3103 /* Enable PCTYPES for RSS: */
3105 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3106 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3107 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3108 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3109 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3110 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3111 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3112 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3113 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3114 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3115 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3117 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3118 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3120 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3121 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3123 /* Populate the LUT with max no. of queues in round robin fashion */
3124 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3125 if (j == vsi->num_queues)
3127 /* lut = 4-byte sliding window of 4 lut entries */
3128 lut = (lut << 8) | (j &
3129 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3130 /* On i = 3, we have 4 entries in lut; write to the register */
3132 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3139 ** This routine is run via an vlan config EVENT,
3140 ** it enables us to use the HW Filter table since
3141 ** we can get the vlan id. This just creates the
3142 ** entry in the soft version of the VFTA, init will
3143 ** repopulate the real table.
3146 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3148 struct ixl_vsi *vsi = ifp->if_softc;
3149 struct i40e_hw *hw = vsi->hw;
3150 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3152 if (ifp->if_softc != arg) /* Not our event */
3155 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3160 ixl_add_filter(vsi, hw->mac.addr, vtag);
3165 ** This routine is run via an vlan
3166 ** unconfig EVENT, remove our entry
3167 ** in the soft vfta.
3170 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3172 struct ixl_vsi *vsi = ifp->if_softc;
3173 struct i40e_hw *hw = vsi->hw;
3174 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3176 if (ifp->if_softc != arg)
3179 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3184 ixl_del_filter(vsi, hw->mac.addr, vtag);
3189 ** This routine updates vlan filters, called by init
3190 ** it scans the filter table and then updates the hw
3191 ** after a soft reset.
3194 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3196 struct ixl_mac_filter *f;
3199 if (vsi->num_vlans == 0)
3202 ** Scan the filter list for vlan entries,
3203 ** mark them for addition and then call
3204 ** for the AQ update.
3206 SLIST_FOREACH(f, &vsi->ftl, next) {
3207 if (f->flags & IXL_FILTER_VLAN) {
3215 printf("setup vlan: no filters found!\n");
3218 flags = IXL_FILTER_VLAN;
3219 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3220 ixl_add_hw_filters(vsi, flags, cnt);
3225 ** Initialize filter list and add filters that the hardware
3226 ** needs to know about.
3229 ixl_init_filters(struct ixl_vsi *vsi)
3231 /* Add broadcast address */
3232 u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3233 ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3237 ** This routine adds mulicast filters
3240 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3242 struct ixl_mac_filter *f;
3244 /* Does one already exist */
3245 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3249 f = ixl_get_filter(vsi);
3251 printf("WARNING: no filter available!!\n");
3254 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3255 f->vlan = IXL_VLAN_ANY;
3256 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3263 ** This routine adds macvlan filters
3266 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3268 struct ixl_mac_filter *f, *tmp;
3269 device_t dev = vsi->dev;
3271 DEBUGOUT("ixl_add_filter: begin");
3273 /* Does one already exist */
3274 f = ixl_find_filter(vsi, macaddr, vlan);
3278 ** Is this the first vlan being registered, if so we
3279 ** need to remove the ANY filter that indicates we are
3280 ** not in a vlan, and replace that with a 0 filter.
3282 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3283 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3285 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3286 ixl_add_filter(vsi, macaddr, 0);
3290 f = ixl_get_filter(vsi);
3292 device_printf(dev, "WARNING: no filter available!!\n");
3295 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3297 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3298 if (f->vlan != IXL_VLAN_ANY)
3299 f->flags |= IXL_FILTER_VLAN;
3301 ixl_add_hw_filters(vsi, f->flags, 1);
3306 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3308 struct ixl_mac_filter *f;
3310 f = ixl_find_filter(vsi, macaddr, vlan);
3314 f->flags |= IXL_FILTER_DEL;
3315 ixl_del_hw_filters(vsi, 1);
3317 /* Check if this is the last vlan removal */
3318 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3319 /* Switch back to a non-vlan filter */
3320 ixl_del_filter(vsi, macaddr, 0);
3321 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3327 ** Find the filter with both matching mac addr and vlan id
3329 static struct ixl_mac_filter *
3330 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3332 struct ixl_mac_filter *f;
3335 SLIST_FOREACH(f, &vsi->ftl, next) {
3336 if (!cmp_etheraddr(f->macaddr, macaddr))
3338 if (f->vlan == vlan) {
3350 ** This routine takes additions to the vsi filter
3351 ** table and creates an Admin Queue call to create
3352 ** the filters in the hardware.
3355 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3357 struct i40e_aqc_add_macvlan_element_data *a, *b;
3358 struct ixl_mac_filter *f;
3359 struct i40e_hw *hw = vsi->hw;
3360 device_t dev = vsi->dev;
3363 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3364 M_DEVBUF, M_NOWAIT | M_ZERO);
3366 device_printf(dev, "add hw filter failed to get memory\n");
3371 ** Scan the filter list, each time we find one
3372 ** we add it to the admin queue array and turn off
3375 SLIST_FOREACH(f, &vsi->ftl, next) {
3376 if (f->flags == flags) {
3377 b = &a[j]; // a pox on fvl long names :)
3378 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3380 (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3381 b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3382 f->flags &= ~IXL_FILTER_ADD;
3389 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3391 device_printf(dev, "aq_add_macvlan failure %d\n",
3392 hw->aq.asq_last_status);
3394 vsi->hw_filters_add += j;
3401 ** This routine takes removals in the vsi filter
3402 ** table and creates an Admin Queue call to delete
3403 ** the filters in the hardware.
3406 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3408 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3409 struct i40e_hw *hw = vsi->hw;
3410 device_t dev = vsi->dev;
3411 struct ixl_mac_filter *f, *f_temp;
3414 DEBUGOUT("ixl_del_hw_filters: begin\n");
3416 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3417 M_DEVBUF, M_NOWAIT | M_ZERO);
3419 printf("del hw filter failed to get memory\n");
3423 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3424 if (f->flags & IXL_FILTER_DEL) {
3425 e = &d[j]; // a pox on fvl long names :)
3426 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3427 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3428 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3429 /* delete entry from vsi list */
3430 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3438 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3439 /* NOTE: returns ENOENT every time but seems to work fine,
3440 so we'll ignore that specific error. */
3441 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3443 for (int i = 0; i < j; i++)
3444 sc += (!d[i].error_code);
3445 vsi->hw_filters_del += sc;
3447 "Failed to remove %d/%d filters, aq error %d\n",
3448 j - sc, j, hw->aq.asq_last_status);
3450 vsi->hw_filters_del += j;
3454 DEBUGOUT("ixl_del_hw_filters: end\n");
3460 ixl_enable_rings(struct ixl_vsi *vsi)
3462 struct i40e_hw *hw = vsi->hw;
3465 for (int i = 0; i < vsi->num_queues; i++) {
3466 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3468 reg = rd32(hw, I40E_QTX_ENA(i));
3469 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3470 I40E_QTX_ENA_QENA_STAT_MASK;
3471 wr32(hw, I40E_QTX_ENA(i), reg);
3472 /* Verify the enable took */
3473 for (int j = 0; j < 10; j++) {
3474 reg = rd32(hw, I40E_QTX_ENA(i));
3475 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3477 i40e_msec_delay(10);
3479 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3480 printf("TX queue %d disabled!\n", i);
3482 reg = rd32(hw, I40E_QRX_ENA(i));
3483 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3484 I40E_QRX_ENA_QENA_STAT_MASK;
3485 wr32(hw, I40E_QRX_ENA(i), reg);
3486 /* Verify the enable took */
3487 for (int j = 0; j < 10; j++) {
3488 reg = rd32(hw, I40E_QRX_ENA(i));
3489 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3491 i40e_msec_delay(10);
3493 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3494 printf("RX queue %d disabled!\n", i);
3499 ixl_disable_rings(struct ixl_vsi *vsi)
3501 struct i40e_hw *hw = vsi->hw;
3504 for (int i = 0; i < vsi->num_queues; i++) {
3505 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3506 i40e_usec_delay(500);
3508 reg = rd32(hw, I40E_QTX_ENA(i));
3509 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3510 wr32(hw, I40E_QTX_ENA(i), reg);
3511 /* Verify the disable took */
3512 for (int j = 0; j < 10; j++) {
3513 reg = rd32(hw, I40E_QTX_ENA(i));
3514 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3516 i40e_msec_delay(10);
3518 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3519 printf("TX queue %d still enabled!\n", i);
3521 reg = rd32(hw, I40E_QRX_ENA(i));
3522 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3523 wr32(hw, I40E_QRX_ENA(i), reg);
3524 /* Verify the disable took */
3525 for (int j = 0; j < 10; j++) {
3526 reg = rd32(hw, I40E_QRX_ENA(i));
3527 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3529 i40e_msec_delay(10);
3531 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3532 printf("RX queue %d still enabled!\n", i);
3537 * ixl_handle_mdd_event
3539 * Called from interrupt handler to identify possibly malicious vfs
3540 * (But also detects events from the PF, as well)
3542 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3544 struct i40e_hw *hw = &pf->hw;
3545 device_t dev = pf->dev;
3546 bool mdd_detected = false;
3547 bool pf_mdd_detected = false;
3550 /* find what triggered the MDD event */
3551 reg = rd32(hw, I40E_GL_MDET_TX);
3552 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3553 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3554 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3555 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3556 I40E_GL_MDET_TX_EVENT_SHIFT;
3557 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3558 I40E_GL_MDET_TX_QUEUE_SHIFT;
3560 "Malicious Driver Detection event 0x%02x"
3561 " on TX queue %d pf number 0x%02x\n",
3562 event, queue, pf_num);
3563 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3564 mdd_detected = true;
3566 reg = rd32(hw, I40E_GL_MDET_RX);
3567 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3568 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3569 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3570 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3571 I40E_GL_MDET_RX_EVENT_SHIFT;
3572 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3573 I40E_GL_MDET_RX_QUEUE_SHIFT;
3575 "Malicious Driver Detection event 0x%02x"
3576 " on RX queue %d of function 0x%02x\n",
3577 event, queue, func);
3578 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3579 mdd_detected = true;
3583 reg = rd32(hw, I40E_PF_MDET_TX);
3584 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3585 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3587 "MDD TX event is for this function 0x%08x",
3589 pf_mdd_detected = true;
3591 reg = rd32(hw, I40E_PF_MDET_RX);
3592 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3593 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3595 "MDD RX event is for this function 0x%08x",
3597 pf_mdd_detected = true;
3601 /* re-enable mdd interrupt cause */
3602 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3603 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3604 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3609 ixl_enable_intr(struct ixl_vsi *vsi)
3611 struct i40e_hw *hw = vsi->hw;
3612 struct ixl_queue *que = vsi->queues;
3614 if (ixl_enable_msix) {
3615 ixl_enable_adminq(hw);
3616 for (int i = 0; i < vsi->num_queues; i++, que++)
3617 ixl_enable_queue(hw, que->me);
3619 ixl_enable_legacy(hw);
3623 ixl_disable_intr(struct ixl_vsi *vsi)
3625 struct i40e_hw *hw = vsi->hw;
3626 struct ixl_queue *que = vsi->queues;
3628 if (ixl_enable_msix) {
3629 ixl_disable_adminq(hw);
3630 for (int i = 0; i < vsi->num_queues; i++, que++)
3631 ixl_disable_queue(hw, que->me);
3633 ixl_disable_legacy(hw);
3637 ixl_enable_adminq(struct i40e_hw *hw)
3641 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3642 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3643 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3644 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3650 ixl_disable_adminq(struct i40e_hw *hw)
3654 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3655 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3661 ixl_enable_queue(struct i40e_hw *hw, int id)
3665 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3666 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3667 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3668 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3672 ixl_disable_queue(struct i40e_hw *hw, int id)
3676 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3677 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3683 ixl_enable_legacy(struct i40e_hw *hw)
3686 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3687 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3688 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3689 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3693 ixl_disable_legacy(struct i40e_hw *hw)
3697 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3698 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3704 ixl_update_stats_counters(struct ixl_pf *pf)
3706 struct i40e_hw *hw = &pf->hw;
3707 struct ixl_vsi *vsi = &pf->vsi;
3709 struct i40e_hw_port_stats *nsd = &pf->stats;
3710 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3712 /* Update hw stats */
3713 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3714 pf->stat_offsets_loaded,
3715 &osd->crc_errors, &nsd->crc_errors);
3716 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3717 pf->stat_offsets_loaded,
3718 &osd->illegal_bytes, &nsd->illegal_bytes);
3719 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3720 I40E_GLPRT_GORCL(hw->port),
3721 pf->stat_offsets_loaded,
3722 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3723 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3724 I40E_GLPRT_GOTCL(hw->port),
3725 pf->stat_offsets_loaded,
3726 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3727 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3728 pf->stat_offsets_loaded,
3729 &osd->eth.rx_discards,
3730 &nsd->eth.rx_discards);
3731 ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3732 pf->stat_offsets_loaded,
3733 &osd->eth.tx_discards,
3734 &nsd->eth.tx_discards);
3735 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3736 I40E_GLPRT_UPRCL(hw->port),
3737 pf->stat_offsets_loaded,
3738 &osd->eth.rx_unicast,
3739 &nsd->eth.rx_unicast);
3740 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3741 I40E_GLPRT_UPTCL(hw->port),
3742 pf->stat_offsets_loaded,
3743 &osd->eth.tx_unicast,
3744 &nsd->eth.tx_unicast);
3745 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3746 I40E_GLPRT_MPRCL(hw->port),
3747 pf->stat_offsets_loaded,
3748 &osd->eth.rx_multicast,
3749 &nsd->eth.rx_multicast);
3750 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3751 I40E_GLPRT_MPTCL(hw->port),
3752 pf->stat_offsets_loaded,
3753 &osd->eth.tx_multicast,
3754 &nsd->eth.tx_multicast);
3755 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3756 I40E_GLPRT_BPRCL(hw->port),
3757 pf->stat_offsets_loaded,
3758 &osd->eth.rx_broadcast,
3759 &nsd->eth.rx_broadcast);
3760 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3761 I40E_GLPRT_BPTCL(hw->port),
3762 pf->stat_offsets_loaded,
3763 &osd->eth.tx_broadcast,
3764 &nsd->eth.tx_broadcast);
3766 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3767 pf->stat_offsets_loaded,
3768 &osd->tx_dropped_link_down,
3769 &nsd->tx_dropped_link_down);
3770 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3771 pf->stat_offsets_loaded,
3772 &osd->mac_local_faults,
3773 &nsd->mac_local_faults);
3774 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3775 pf->stat_offsets_loaded,
3776 &osd->mac_remote_faults,
3777 &nsd->mac_remote_faults);
3778 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3779 pf->stat_offsets_loaded,
3780 &osd->rx_length_errors,
3781 &nsd->rx_length_errors);
3783 /* Flow control (LFC) stats */
3784 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3785 pf->stat_offsets_loaded,
3786 &osd->link_xon_rx, &nsd->link_xon_rx);
3787 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3788 pf->stat_offsets_loaded,
3789 &osd->link_xon_tx, &nsd->link_xon_tx);
3790 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3791 pf->stat_offsets_loaded,
3792 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3793 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3794 pf->stat_offsets_loaded,
3795 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3797 /* Priority flow control stats */
3799 for (int i = 0; i < 8; i++) {
3800 ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3801 pf->stat_offsets_loaded,
3802 &osd->priority_xon_rx[i],
3803 &nsd->priority_xon_rx[i]);
3804 ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3805 pf->stat_offsets_loaded,
3806 &osd->priority_xon_tx[i],
3807 &nsd->priority_xon_tx[i]);
3808 ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3809 pf->stat_offsets_loaded,
3810 &osd->priority_xoff_tx[i],
3811 &nsd->priority_xoff_tx[i]);
3812 ixl_stat_update32(hw,
3813 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3814 pf->stat_offsets_loaded,
3815 &osd->priority_xon_2_xoff[i],
3816 &nsd->priority_xon_2_xoff[i]);
3820 /* Packet size stats rx */
3821 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3822 I40E_GLPRT_PRC64L(hw->port),
3823 pf->stat_offsets_loaded,
3824 &osd->rx_size_64, &nsd->rx_size_64);
3825 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3826 I40E_GLPRT_PRC127L(hw->port),
3827 pf->stat_offsets_loaded,
3828 &osd->rx_size_127, &nsd->rx_size_127);
3829 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3830 I40E_GLPRT_PRC255L(hw->port),
3831 pf->stat_offsets_loaded,
3832 &osd->rx_size_255, &nsd->rx_size_255);
3833 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3834 I40E_GLPRT_PRC511L(hw->port),
3835 pf->stat_offsets_loaded,
3836 &osd->rx_size_511, &nsd->rx_size_511);
3837 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3838 I40E_GLPRT_PRC1023L(hw->port),
3839 pf->stat_offsets_loaded,
3840 &osd->rx_size_1023, &nsd->rx_size_1023);
3841 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3842 I40E_GLPRT_PRC1522L(hw->port),
3843 pf->stat_offsets_loaded,
3844 &osd->rx_size_1522, &nsd->rx_size_1522);
3845 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3846 I40E_GLPRT_PRC9522L(hw->port),
3847 pf->stat_offsets_loaded,
3848 &osd->rx_size_big, &nsd->rx_size_big);
3850 /* Packet size stats tx */
3851 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3852 I40E_GLPRT_PTC64L(hw->port),
3853 pf->stat_offsets_loaded,
3854 &osd->tx_size_64, &nsd->tx_size_64);
3855 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3856 I40E_GLPRT_PTC127L(hw->port),
3857 pf->stat_offsets_loaded,
3858 &osd->tx_size_127, &nsd->tx_size_127);
3859 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3860 I40E_GLPRT_PTC255L(hw->port),
3861 pf->stat_offsets_loaded,
3862 &osd->tx_size_255, &nsd->tx_size_255);
3863 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3864 I40E_GLPRT_PTC511L(hw->port),
3865 pf->stat_offsets_loaded,
3866 &osd->tx_size_511, &nsd->tx_size_511);
3867 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3868 I40E_GLPRT_PTC1023L(hw->port),
3869 pf->stat_offsets_loaded,
3870 &osd->tx_size_1023, &nsd->tx_size_1023);
3871 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3872 I40E_GLPRT_PTC1522L(hw->port),
3873 pf->stat_offsets_loaded,
3874 &osd->tx_size_1522, &nsd->tx_size_1522);
3875 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3876 I40E_GLPRT_PTC9522L(hw->port),
3877 pf->stat_offsets_loaded,
3878 &osd->tx_size_big, &nsd->tx_size_big);
3880 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3881 pf->stat_offsets_loaded,
3882 &osd->rx_undersize, &nsd->rx_undersize);
3883 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3884 pf->stat_offsets_loaded,
3885 &osd->rx_fragments, &nsd->rx_fragments);
3886 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3887 pf->stat_offsets_loaded,
3888 &osd->rx_oversize, &nsd->rx_oversize);
3889 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3890 pf->stat_offsets_loaded,
3891 &osd->rx_jabber, &nsd->rx_jabber);
3892 pf->stat_offsets_loaded = true;
3895 /* Update vsi stats */
3896 ixl_update_eth_stats(vsi);
3899 // ERJ - these are per-port, update all vsis?
3900 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
3904 ** Tasklet handler for MSIX Adminq interrupts
3905 ** - do outside interrupt since it might sleep
3908 ixl_do_adminq(void *context, int pending)
3910 struct ixl_pf *pf = context;
3911 struct i40e_hw *hw = &pf->hw;
3912 struct ixl_vsi *vsi = &pf->vsi;
3913 struct i40e_arq_event_info event;
3918 event.msg_len = IXL_AQ_BUF_SZ;
3919 event.msg_buf = malloc(event.msg_len,
3920 M_DEVBUF, M_NOWAIT | M_ZERO);
3921 if (!event.msg_buf) {
3922 printf("Unable to allocate adminq memory\n");
3926 /* clean and process any events */
3928 ret = i40e_clean_arq_element(hw, &event, &result);
3931 opcode = LE16_TO_CPU(event.desc.opcode);
3933 case i40e_aqc_opc_get_link_status:
3934 vsi->link_up = ixl_config_link(hw);
3935 ixl_update_link_status(pf);
3937 case i40e_aqc_opc_send_msg_to_pf:
3938 /* process pf/vf communication here */
3940 case i40e_aqc_opc_event_lan_overflow:
3944 printf("AdminQ unknown event %x\n", opcode);
3949 } while (result && (loop++ < IXL_ADM_LIMIT));
3951 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3952 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3953 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3954 free(event.msg_buf, M_DEVBUF);
3957 ixl_enable_adminq(&pf->hw);
3959 ixl_enable_intr(vsi);
3963 ixl_debug_info(SYSCTL_HANDLER_ARGS)
3966 int error, input = 0;
3968 error = sysctl_handle_int(oidp, &input, 0, req);
3970 if (error || !req->newptr)
3974 pf = (struct ixl_pf *)arg1;
3975 ixl_print_debug_info(pf);
3982 ixl_print_debug_info(struct ixl_pf *pf)
3984 struct i40e_hw *hw = &pf->hw;
3985 struct ixl_vsi *vsi = &pf->vsi;
3986 struct ixl_queue *que = vsi->queues;
3987 struct rx_ring *rxr = &que->rxr;
3988 struct tx_ring *txr = &que->txr;
3992 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
3993 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
3994 printf("RX next check = %x\n", rxr->next_check);
3995 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
3996 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
3997 printf("TX desc avail = %x\n", txr->avail);
3999 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4000 printf("RX Bytes = %x\n", reg);
4001 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4002 printf("Port RX Bytes = %x\n", reg);
4003 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4004 printf("RX discard = %x\n", reg);
4005 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4006 printf("Port RX discard = %x\n", reg);
4008 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4009 printf("TX errors = %x\n", reg);
4010 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4011 printf("TX Bytes = %x\n", reg);
4013 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4014 printf("RX undersize = %x\n", reg);
4015 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4016 printf("RX fragments = %x\n", reg);
4017 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4018 printf("RX oversize = %x\n", reg);
4019 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4020 printf("RX length error = %x\n", reg);
4021 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4022 printf("mac remote fault = %x\n", reg);
4023 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4024 printf("mac local fault = %x\n", reg);
4028 * Update VSI-specific ethernet statistics counters.
4030 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4032 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4033 struct i40e_hw *hw = &pf->hw;
4034 struct i40e_eth_stats *es;
4035 struct i40e_eth_stats *oes;
4037 uint64_t tx_discards;
4038 struct i40e_hw_port_stats *nsd;
4039 u16 stat_idx = vsi->info.stat_counter_idx;
4041 es = &vsi->eth_stats;
4042 oes = &vsi->eth_stats_offsets;
4045 /* Gather up the stats that the hw collects */
4046 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4047 vsi->stat_offsets_loaded,
4048 &oes->tx_errors, &es->tx_errors);
4049 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4050 vsi->stat_offsets_loaded,
4051 &oes->rx_discards, &es->rx_discards);
4053 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4054 I40E_GLV_GORCL(stat_idx),
4055 vsi->stat_offsets_loaded,
4056 &oes->rx_bytes, &es->rx_bytes);
4057 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4058 I40E_GLV_UPRCL(stat_idx),
4059 vsi->stat_offsets_loaded,
4060 &oes->rx_unicast, &es->rx_unicast);
4061 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4062 I40E_GLV_MPRCL(stat_idx),
4063 vsi->stat_offsets_loaded,
4064 &oes->rx_multicast, &es->rx_multicast);
4065 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4066 I40E_GLV_BPRCL(stat_idx),
4067 vsi->stat_offsets_loaded,
4068 &oes->rx_broadcast, &es->rx_broadcast);
4070 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4071 I40E_GLV_GOTCL(stat_idx),
4072 vsi->stat_offsets_loaded,
4073 &oes->tx_bytes, &es->tx_bytes);
4074 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4075 I40E_GLV_UPTCL(stat_idx),
4076 vsi->stat_offsets_loaded,
4077 &oes->tx_unicast, &es->tx_unicast);
4078 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4079 I40E_GLV_MPTCL(stat_idx),
4080 vsi->stat_offsets_loaded,
4081 &oes->tx_multicast, &es->tx_multicast);
4082 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4083 I40E_GLV_BPTCL(stat_idx),
4084 vsi->stat_offsets_loaded,
4085 &oes->tx_broadcast, &es->tx_broadcast);
4086 vsi->stat_offsets_loaded = true;
4088 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4089 for (i = 0; i < vsi->num_queues; i++)
4090 tx_discards += vsi->queues[i].txr.br->br_drops;
4092 /* Update ifnet stats */
4093 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4096 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4099 IXL_SET_IBYTES(vsi, es->rx_bytes);
4100 IXL_SET_OBYTES(vsi, es->tx_bytes);
4101 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4102 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4104 IXL_SET_OERRORS(vsi, es->tx_errors);
4105 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4106 IXL_SET_OQDROPS(vsi, tx_discards);
4107 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4108 IXL_SET_COLLISIONS(vsi, 0);
4112 * Reset all of the stats for the given pf
4114 void ixl_pf_reset_stats(struct ixl_pf *pf)
4116 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4117 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4118 pf->stat_offsets_loaded = false;
4122 * Resets all stats of the given vsi
4124 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4126 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4127 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4128 vsi->stat_offsets_loaded = false;
4132 * Read and update a 48 bit stat from the hw
4134 * Since the device stats are not reset at PFReset, they likely will not
4135 * be zeroed when the driver starts. We'll save the first values read
4136 * and use them as offsets to be subtracted from the raw values in order
4137 * to report stats that count from zero.
4140 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4141 bool offset_loaded, u64 *offset, u64 *stat)
4145 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4146 new_data = rd64(hw, loreg);
4149 * Use two rd32's instead of one rd64; FreeBSD versions before
4150 * 10 don't support 8 byte bus reads/writes.
4152 new_data = rd32(hw, loreg);
4153 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4158 if (new_data >= *offset)
4159 *stat = new_data - *offset;
4161 *stat = (new_data + ((u64)1 << 48)) - *offset;
4162 *stat &= 0xFFFFFFFFFFFFULL;
4166 * Read and update a 32 bit stat from the hw
4169 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4170 bool offset_loaded, u64 *offset, u64 *stat)
4174 new_data = rd32(hw, reg);
4177 if (new_data >= *offset)
4178 *stat = (u32)(new_data - *offset);
4180 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4184 ** Set flow control using sysctl:
4191 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4194 * TODO: ensure flow control is disabled if
4195 * priority flow control is enabled
4197 * TODO: ensure tx CRC by hardware should be enabled
4198 * if tx flow control is enabled.
4200 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4201 struct i40e_hw *hw = &pf->hw;
4202 device_t dev = pf->dev;
4203 int requested_fc = 0, error = 0;
4204 enum i40e_status_code aq_error = 0;
4207 aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4210 "%s: Error retrieving link info from aq, %d\n",
4211 __func__, aq_error);
4215 /* Read in new mode */
4216 requested_fc = hw->fc.current_mode;
4217 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4218 if ((error) || (req->newptr == NULL))
4220 if (requested_fc < 0 || requested_fc > 3) {
4222 "Invalid fc mode; valid modes are 0 through 3\n");
4227 ** Changing flow control mode currently does not work on
4230 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4231 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4232 device_printf(dev, "Changing flow control mode unsupported"
4233 " on 40GBase-CR4 media.\n");
4237 /* Set fc ability for port */
4238 hw->fc.requested_mode = requested_fc;
4239 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4242 "%s: Error setting new fc mode %d; fc_err %#x\n",
4243 __func__, aq_error, fc_aq_err);
4247 if (hw->fc.current_mode != hw->fc.requested_mode) {
4248 device_printf(dev, "%s: FC set failure:\n", __func__);
4249 device_printf(dev, "%s: Current: %s / Requested: %s\n",
4251 ixl_fc_string[hw->fc.current_mode],
4252 ixl_fc_string[hw->fc.requested_mode]);
4259 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4261 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4262 struct i40e_hw *hw = &pf->hw;
4263 int error = 0, index = 0;
4274 ixl_update_link_status(pf);
4276 switch (hw->phy.link_info.link_speed) {
4277 case I40E_LINK_SPEED_100MB:
4280 case I40E_LINK_SPEED_1GB:
4283 case I40E_LINK_SPEED_10GB:
4286 case I40E_LINK_SPEED_40GB:
4289 case I40E_LINK_SPEED_20GB:
4292 case I40E_LINK_SPEED_UNKNOWN:
4298 error = sysctl_handle_string(oidp, speeds[index],
4299 strlen(speeds[index]), req);
4304 ** Control link advertise speed:
4306 ** 0x1 - advertise 100 Mb
4307 ** 0x2 - advertise 1G
4308 ** 0x4 - advertise 10G
4310 ** Does not work on 40G devices.
4313 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4315 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4316 struct i40e_hw *hw = &pf->hw;
4317 device_t dev = pf->dev;
4318 struct i40e_aq_get_phy_abilities_resp abilities;
4319 struct i40e_aq_set_phy_config config;
4320 int requested_ls = 0;
4321 enum i40e_status_code aq_error = 0;
4325 ** FW doesn't support changing advertised speed
4326 ** for 40G devices; speed is always 40G.
4328 if (i40e_is_40G_device(hw->device_id))
4331 /* Read in new mode */
4332 requested_ls = pf->advertised_speed;
4333 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4334 if ((error) || (req->newptr == NULL))
4336 if (requested_ls < 1 || requested_ls > 7) {
4338 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4342 /* Exit if no change */
4343 if (pf->advertised_speed == requested_ls)
4346 /* Get current capability information */
4347 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4349 device_printf(dev, "%s: Error getting phy capabilities %d,"
4350 " aq error: %d\n", __func__, aq_error,
4351 hw->aq.asq_last_status);
4355 /* Prepare new config */
4356 bzero(&config, sizeof(config));
4357 config.phy_type = abilities.phy_type;
4358 config.abilities = abilities.abilities
4359 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4360 config.eee_capability = abilities.eee_capability;
4361 config.eeer = abilities.eeer_val;
4362 config.low_power_ctrl = abilities.d3_lpan;
4363 /* Translate into aq cmd link_speed */
4364 if (requested_ls & 0x4)
4365 config.link_speed |= I40E_LINK_SPEED_10GB;
4366 if (requested_ls & 0x2)
4367 config.link_speed |= I40E_LINK_SPEED_1GB;
4368 if (requested_ls & 0x1)
4369 config.link_speed |= I40E_LINK_SPEED_100MB;
4371 /* Do aq command & restart link */
4372 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4374 device_printf(dev, "%s: Error setting new phy config %d,"
4375 " aq error: %d\n", __func__, aq_error,
4376 hw->aq.asq_last_status);
4380 pf->advertised_speed = requested_ls;
4381 ixl_update_link_status(pf);
4386 ** Get the width and transaction speed of
4387 ** the bus this adapter is plugged into.
4390 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4396 /* Get the PCI Express Capabilities offset */
4397 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4399 /* ...and read the Link Status Register */
4400 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4402 switch (link & I40E_PCI_LINK_WIDTH) {
4403 case I40E_PCI_LINK_WIDTH_1:
4404 hw->bus.width = i40e_bus_width_pcie_x1;
4406 case I40E_PCI_LINK_WIDTH_2:
4407 hw->bus.width = i40e_bus_width_pcie_x2;
4409 case I40E_PCI_LINK_WIDTH_4:
4410 hw->bus.width = i40e_bus_width_pcie_x4;
4412 case I40E_PCI_LINK_WIDTH_8:
4413 hw->bus.width = i40e_bus_width_pcie_x8;
4416 hw->bus.width = i40e_bus_width_unknown;
4420 switch (link & I40E_PCI_LINK_SPEED) {
4421 case I40E_PCI_LINK_SPEED_2500:
4422 hw->bus.speed = i40e_bus_speed_2500;
4424 case I40E_PCI_LINK_SPEED_5000:
4425 hw->bus.speed = i40e_bus_speed_5000;
4427 case I40E_PCI_LINK_SPEED_8000:
4428 hw->bus.speed = i40e_bus_speed_8000;
4431 hw->bus.speed = i40e_bus_speed_unknown;
4436 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4437 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4438 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4439 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4440 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4441 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4442 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4445 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4446 (hw->bus.speed < i40e_bus_speed_8000)) {
4447 device_printf(dev, "PCI-Express bandwidth available"
4448 " for this device\n is not sufficient for"
4449 " normal operation.\n");
4450 device_printf(dev, "For expected performance a x8 "
4451 "PCIE Gen3 slot is required.\n");
4459 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4461 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4462 struct i40e_hw *hw = &pf->hw;
4463 struct i40e_link_status link_status;
4466 enum i40e_status_code aq_error = 0;
4468 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4470 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4475 "PHY Type : %#04x\n"
4477 "Link info: %#04x\n"
4480 link_status.phy_type, link_status.link_speed,
4481 link_status.link_info, link_status.an_info,
4482 link_status.ext_info);
4484 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4488 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4490 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4491 struct i40e_hw *hw = &pf->hw;
4492 struct i40e_aq_get_phy_abilities_resp abilities_resp;
4495 enum i40e_status_code aq_error = 0;
4497 // TODO: Print out list of qualified modules as well?
4498 aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4500 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4505 "PHY Type : %#010x\n"
4507 "Abilities: %#04x\n"
4509 "EEER reg : %#010x\n"
4511 abilities_resp.phy_type, abilities_resp.link_speed,
4512 abilities_resp.abilities, abilities_resp.eee_capability,
4513 abilities_resp.eeer_val, abilities_resp.d3_lpan);
4515 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4519 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4521 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4522 struct ixl_vsi *vsi = &pf->vsi;
4523 struct ixl_mac_filter *f;
4528 int ftl_counter = 0;
4532 SLIST_FOREACH(f, &vsi->ftl, next) {
4537 sysctl_handle_string(oidp, "(none)", 6, req);
4541 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4542 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4544 sprintf(buf_i++, "\n");
4545 SLIST_FOREACH(f, &vsi->ftl, next) {
4547 MAC_FORMAT ", vlan %4d, flags %#06x",
4548 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4550 /* don't print '\n' for last entry */
4551 if (++ftl_counter != ftl_len) {
4552 sprintf(buf_i, "\n");
4557 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4559 printf("sysctl error: %d\n", error);
4560 free(buf, M_DEVBUF);
4564 #define IXL_SW_RES_SIZE 0x14
4566 ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4568 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4569 struct i40e_hw *hw = &pf->hw;
4570 device_t dev = pf->dev;
4575 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4577 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4579 device_printf(dev, "Could not allocate sbuf for output.\n");
4583 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4588 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4589 __func__, error, hw->aq.asq_last_status);
4593 device_printf(dev, "Num_entries: %d\n", num_entries);
4595 sbuf_cat(buf, "\n");
4597 "Type | Guaranteed | Total | Used | Un-allocated\n"
4598 " | (this) | (all) | (this) | (all) \n");
4599 for (int i = 0; i < num_entries; i++) {
4601 "%#4x | %10d %5d %6d %12d",
4602 resp[i].resource_type,
4606 resp[i].total_unalloced);
4607 if (i < num_entries - 1)
4608 sbuf_cat(buf, "\n");
4611 error = sbuf_finish(buf);
4613 device_printf(dev, "Error finishing sbuf: %d\n", error);
4618 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4620 device_printf(dev, "sysctl error: %d\n", error);
4627 ** Dump TX desc given index.
4628 ** Doesn't work; don't use.
4629 ** TODO: Also needs a queue index input!
4632 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4634 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4635 device_t dev = pf->dev;
4641 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4643 device_printf(dev, "Could not allocate sbuf for output.\n");
4648 error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4651 if (req->newptr == NULL)
4652 return (EIO); // fix
4653 if (desc_idx > 1024) { // fix
4655 "Invalid descriptor index, needs to be < 1024\n"); // fix
4659 // Don't use this sysctl yet
4663 sbuf_cat(buf, "\n");
4666 struct ixl_queue *que = pf->vsi.queues;
4667 struct tx_ring *txr = &(que[1].txr);
4668 struct i40e_tx_desc *txd = &txr->base[desc_idx];
4670 sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4671 sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4672 sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4674 error = sbuf_finish(buf);
4676 device_printf(dev, "Error finishing sbuf: %d\n", error);
4681 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4683 device_printf(dev, "sysctl error: %d\n", error);