1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
44 #include <net/rss_config.h>
47 /*********************************************************************
49 *********************************************************************/
50 char ixl_driver_version[] = "1.4.3";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixl_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
76 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
77 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
78 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
80 /* required last entry */
84 /*********************************************************************
85 * Table of branding strings
86 *********************************************************************/
88 static char *ixl_strings[] = {
89 "Intel(R) Ethernet Connection XL710 Driver"
93 /*********************************************************************
95 *********************************************************************/
96 static int ixl_probe(device_t);
97 static int ixl_attach(device_t);
98 static int ixl_detach(device_t);
99 static int ixl_shutdown(device_t);
100 static int ixl_get_hw_capabilities(struct ixl_pf *);
101 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
102 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void ixl_init(void *);
104 static void ixl_init_locked(struct ixl_pf *);
105 static void ixl_stop(struct ixl_pf *);
106 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
107 static int ixl_media_change(struct ifnet *);
108 static void ixl_update_link_status(struct ixl_pf *);
109 static int ixl_allocate_pci_resources(struct ixl_pf *);
110 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
111 static int ixl_setup_stations(struct ixl_pf *);
112 static int ixl_switch_config(struct ixl_pf *);
113 static int ixl_initialize_vsi(struct ixl_vsi *);
114 static int ixl_assign_vsi_msix(struct ixl_pf *);
115 static int ixl_assign_vsi_legacy(struct ixl_pf *);
116 static int ixl_init_msix(struct ixl_pf *);
117 static void ixl_configure_msix(struct ixl_pf *);
118 static void ixl_configure_itr(struct ixl_pf *);
119 static void ixl_configure_legacy(struct ixl_pf *);
120 static void ixl_free_pci_resources(struct ixl_pf *);
121 static void ixl_local_timer(void *);
122 static int ixl_setup_interface(device_t, struct ixl_vsi *);
123 static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
124 static void ixl_config_rss(struct ixl_vsi *);
125 static void ixl_set_queue_rx_itr(struct ixl_queue *);
126 static void ixl_set_queue_tx_itr(struct ixl_queue *);
127 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
129 static int ixl_enable_rings(struct ixl_vsi *);
130 static int ixl_disable_rings(struct ixl_vsi *);
131 static void ixl_enable_intr(struct ixl_vsi *);
132 static void ixl_disable_intr(struct ixl_vsi *);
133 static void ixl_disable_rings_intr(struct ixl_vsi *);
135 static void ixl_enable_adminq(struct i40e_hw *);
136 static void ixl_disable_adminq(struct i40e_hw *);
137 static void ixl_enable_queue(struct i40e_hw *, int);
138 static void ixl_disable_queue(struct i40e_hw *, int);
139 static void ixl_enable_legacy(struct i40e_hw *);
140 static void ixl_disable_legacy(struct i40e_hw *);
142 static void ixl_set_promisc(struct ixl_vsi *);
143 static void ixl_add_multi(struct ixl_vsi *);
144 static void ixl_del_multi(struct ixl_vsi *);
145 static void ixl_register_vlan(void *, struct ifnet *, u16);
146 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
147 static void ixl_setup_vlan_filters(struct ixl_vsi *);
149 static void ixl_init_filters(struct ixl_vsi *);
150 static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
151 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
152 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
154 static void ixl_del_hw_filters(struct ixl_vsi *, int);
155 static struct ixl_mac_filter *
156 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
157 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
158 static void ixl_free_mac_filters(struct ixl_vsi *vsi);
161 /* Sysctl debug interface */
162 #ifdef IXL_DEBUG_SYSCTL
163 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
164 static void ixl_print_debug_info(struct ixl_pf *);
167 /* The MSI/X Interrupt handlers */
168 static void ixl_intr(void *);
169 static void ixl_msix_que(void *);
170 static void ixl_msix_adminq(void *);
171 static void ixl_handle_mdd_event(struct ixl_pf *);
173 /* Deferred interrupt tasklets */
174 static void ixl_do_adminq(void *, int);
176 /* Sysctl handlers */
177 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
178 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
179 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
180 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
183 static void ixl_add_hw_stats(struct ixl_pf *);
184 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
185 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
186 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
187 struct sysctl_oid_list *,
188 struct i40e_eth_stats *);
189 static void ixl_update_stats_counters(struct ixl_pf *);
190 static void ixl_update_eth_stats(struct ixl_vsi *);
191 static void ixl_update_vsi_stats(struct ixl_vsi *);
192 static void ixl_pf_reset_stats(struct ixl_pf *);
193 static void ixl_vsi_reset_stats(struct ixl_vsi *);
194 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
196 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
199 #ifdef IXL_DEBUG_SYSCTL
200 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
201 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
202 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
203 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
204 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
208 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
210 static int ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
211 static void ixl_uninit_iov(device_t dev);
212 static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
214 static void ixl_handle_vf_msg(struct ixl_pf *,
215 struct i40e_arq_event_info *);
216 static void ixl_handle_vflr(void *arg, int pending);
218 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
219 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
222 /*********************************************************************
223 * FreeBSD Device Interface Entry Points
224 *********************************************************************/
226 static device_method_t ixl_methods[] = {
227 /* Device interface */
228 DEVMETHOD(device_probe, ixl_probe),
229 DEVMETHOD(device_attach, ixl_attach),
230 DEVMETHOD(device_detach, ixl_detach),
231 DEVMETHOD(device_shutdown, ixl_shutdown),
233 DEVMETHOD(pci_init_iov, ixl_init_iov),
234 DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
235 DEVMETHOD(pci_add_vf, ixl_add_vf),
240 static driver_t ixl_driver = {
241 "ixl", ixl_methods, sizeof(struct ixl_pf),
244 devclass_t ixl_devclass;
245 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
247 MODULE_DEPEND(ixl, pci, 1, 1, 1);
248 MODULE_DEPEND(ixl, ether, 1, 1, 1);
250 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
251 #endif /* DEV_NETMAP */
254 ** Global reset mutex
256 static struct mtx ixl_reset_mtx;
259 ** TUNEABLE PARAMETERS:
262 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
263 "IXL driver parameters");
266 * MSIX should be the default for best performance,
267 * but this allows it to be forced off for testing.
269 static int ixl_enable_msix = 1;
270 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
272 "Enable MSI-X interrupts");
275 ** Number of descriptors per ring:
276 ** - TX and RX are the same size
278 static int ixl_ringsz = DEFAULT_RING;
279 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
280 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
281 &ixl_ringsz, 0, "Descriptor Ring Size");
284 ** This can be set manually, if left as 0 the
285 ** number of queues will be calculated based
286 ** on cpus and msix vectors available.
288 int ixl_max_queues = 0;
289 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
290 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
291 &ixl_max_queues, 0, "Number of Queues");
294 ** Controls for Interrupt Throttling
295 ** - true/false for dynamic adjustment
296 ** - default values for static ITR
298 int ixl_dynamic_rx_itr = 0;
299 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
300 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
301 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
303 int ixl_dynamic_tx_itr = 0;
304 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
305 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
306 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
308 int ixl_rx_itr = IXL_ITR_8K;
309 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
310 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
311 &ixl_rx_itr, 0, "RX Interrupt Rate");
313 int ixl_tx_itr = IXL_ITR_4K;
314 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
315 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
316 &ixl_tx_itr, 0, "TX Interrupt Rate");
319 static int ixl_enable_fdir = 1;
320 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
321 /* Rate at which we sample */
322 int ixl_atr_rate = 20;
323 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
327 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
328 #include <dev/netmap/if_ixl_netmap.h>
329 #endif /* DEV_NETMAP */
331 static char *ixl_fc_string[6] = {
340 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
342 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
343 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
345 /*********************************************************************
346 * Device identification routine
348 * ixl_probe determines if the driver should be loaded on
349 * the hardware based on PCI vendor/device id of the device.
351 * return BUS_PROBE_DEFAULT on success, positive on failure
352 *********************************************************************/
355 ixl_probe(device_t dev)
357 ixl_vendor_info_t *ent;
359 u16 pci_vendor_id, pci_device_id;
360 u16 pci_subvendor_id, pci_subdevice_id;
361 char device_name[256];
362 static bool lock_init = FALSE;
364 INIT_DEBUGOUT("ixl_probe: begin");
366 pci_vendor_id = pci_get_vendor(dev);
367 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
370 pci_device_id = pci_get_device(dev);
371 pci_subvendor_id = pci_get_subvendor(dev);
372 pci_subdevice_id = pci_get_subdevice(dev);
374 ent = ixl_vendor_info_array;
375 while (ent->vendor_id != 0) {
376 if ((pci_vendor_id == ent->vendor_id) &&
377 (pci_device_id == ent->device_id) &&
379 ((pci_subvendor_id == ent->subvendor_id) ||
380 (ent->subvendor_id == 0)) &&
382 ((pci_subdevice_id == ent->subdevice_id) ||
383 (ent->subdevice_id == 0))) {
384 sprintf(device_name, "%s, Version - %s",
385 ixl_strings[ent->index],
387 device_set_desc_copy(dev, device_name);
388 /* One shot mutex init */
389 if (lock_init == FALSE) {
391 mtx_init(&ixl_reset_mtx,
393 "IXL RESET Lock", MTX_DEF);
395 return (BUS_PROBE_DEFAULT);
402 /*********************************************************************
403 * Device initialization routine
405 * The attach entry point is called when the driver is being loaded.
406 * This routine identifies the type of hardware, allocates all resources
407 * and initializes the hardware.
409 * return 0 on success, positive on failure
410 *********************************************************************/
413 ixl_attach(device_t dev)
421 nvlist_t *pf_schema, *vf_schema;
425 INIT_DEBUGOUT("ixl_attach: begin");
427 /* Allocate, clear, and link in our primary soft structure */
428 pf = device_get_softc(dev);
429 pf->dev = pf->osdep.dev = dev;
433 ** Note this assumes we have a single embedded VSI,
434 ** this could be enhanced later to allocate multiple
440 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
442 /* Set up the timer callout */
443 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
446 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
449 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
451 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
454 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
456 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
459 pf, 0, ixl_current_speed, "A", "Current Port Speed");
461 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
462 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
464 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
466 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
467 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468 OID_AUTO, "rx_itr", CTLFLAG_RW,
469 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
471 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
472 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
473 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
474 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
476 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
477 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
478 OID_AUTO, "tx_itr", CTLFLAG_RW,
479 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
481 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
482 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
483 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
484 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
486 #ifdef IXL_DEBUG_SYSCTL
487 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
488 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
490 ixl_debug_info, "I", "Debug Information");
492 /* Debug shared-code message level */
493 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
494 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 OID_AUTO, "debug_mask", CTLFLAG_RW,
496 &pf->hw.debug_mask, 0, "Debug Message Level");
498 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
499 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
500 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
501 0, "PF/VF Virtual Channel debug level");
503 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
504 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
505 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
506 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
508 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
511 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
513 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
516 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
518 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
519 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
520 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
521 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
523 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
524 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
525 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
526 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
529 /* Save off the PCI information */
530 hw->vendor_id = pci_get_vendor(dev);
531 hw->device_id = pci_get_device(dev);
532 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
533 hw->subsystem_vendor_id =
534 pci_read_config(dev, PCIR_SUBVEND_0, 2);
535 hw->subsystem_device_id =
536 pci_read_config(dev, PCIR_SUBDEV_0, 2);
538 hw->bus.device = pci_get_slot(dev);
539 hw->bus.func = pci_get_function(dev);
541 pf->vc_debug_lvl = 1;
543 /* Do PCI setup - map BAR0, etc */
544 if (ixl_allocate_pci_resources(pf)) {
545 device_printf(dev, "Allocation of PCI resources failed\n");
550 /* Establish a clean starting point */
552 error = i40e_pf_reset(hw);
554 device_printf(dev,"PF reset failure %x\n", error);
559 /* Set admin queue parameters */
560 hw->aq.num_arq_entries = IXL_AQ_LEN;
561 hw->aq.num_asq_entries = IXL_AQ_LEN;
562 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
563 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
565 /* Initialize the shared code */
566 error = i40e_init_shared_code(hw);
568 device_printf(dev,"Unable to initialize the shared code\n");
573 /* Set up the admin queue */
574 error = i40e_init_adminq(hw);
576 device_printf(dev, "The driver for the device stopped "
577 "because the NVM image is newer than expected.\n"
578 "You must install the most recent version of "
579 " the network driver.\n");
582 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
584 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
585 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
586 device_printf(dev, "The driver for the device detected "
587 "a newer version of the NVM image than expected.\n"
588 "Please install the most recent version of the network driver.\n");
589 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
590 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
591 device_printf(dev, "The driver for the device detected "
592 "an older version of the NVM image than expected.\n"
593 "Please update the NVM image.\n");
596 i40e_clear_pxe_mode(hw);
598 /* Get capabilities from the device */
599 error = ixl_get_hw_capabilities(pf);
601 device_printf(dev, "HW capabilities failure!\n");
605 /* Set up host memory cache */
606 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
607 hw->func_caps.num_rx_qp, 0, 0);
609 device_printf(dev, "init_lan_hmc failed: %d\n", error);
613 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
615 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
619 /* Disable LLDP from the firmware */
620 i40e_aq_stop_lldp(hw, TRUE, NULL);
622 i40e_get_mac_addr(hw, hw->mac.addr);
623 error = i40e_validate_mac_addr(hw->mac.addr);
625 device_printf(dev, "validate_mac_addr failed: %d\n", error);
628 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
629 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
631 /* Set up VSI and queues */
632 if (ixl_setup_stations(pf) != 0) {
633 device_printf(dev, "setup stations failed!\n");
638 /* Initialize mac filter list for VSI */
639 SLIST_INIT(&vsi->ftl);
641 /* Set up interrupt routing here */
643 error = ixl_assign_vsi_msix(pf);
645 error = ixl_assign_vsi_legacy(pf);
649 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
650 (hw->aq.fw_maj_ver < 4)) {
652 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
654 device_printf(dev, "link restart failed, aq_err=%d\n",
655 pf->hw.aq.asq_last_status);
658 /* Determine link state */
659 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
660 i40e_get_link_status(hw, &pf->link_up);
662 /* Setup OS specific network interface */
663 if (ixl_setup_interface(dev, vsi) != 0) {
664 device_printf(dev, "interface setup failed!\n");
669 error = ixl_switch_config(pf);
671 device_printf(dev, "Initial switch config failed: %d\n", error);
675 /* Limit phy interrupts to link and modules failure */
676 error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN |
677 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
679 device_printf(dev, "set phy mask failed: %d\n", error);
681 /* Get the bus configuration and set the shared code */
682 bus = ixl_get_bus_info(hw, dev);
683 i40e_set_pci_config_data(hw, bus);
685 /* Initialize statistics */
686 ixl_pf_reset_stats(pf);
687 ixl_update_stats_counters(pf);
688 ixl_add_hw_stats(pf);
690 /* Register for VLAN events */
691 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
692 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
693 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
694 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
697 /* SR-IOV is only supported when MSI-X is in use. */
699 pf_schema = pci_iov_schema_alloc_node();
700 vf_schema = pci_iov_schema_alloc_node();
701 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
702 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
703 IOV_SCHEMA_HASDEFAULT, TRUE);
704 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
705 IOV_SCHEMA_HASDEFAULT, FALSE);
706 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
707 IOV_SCHEMA_HASDEFAULT, FALSE);
709 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
712 "Failed to initialize SR-IOV (error=%d)\n",
718 ixl_netmap_attach(vsi);
719 #endif /* DEV_NETMAP */
720 INIT_DEBUGOUT("ixl_attach: end");
724 if (vsi->ifp != NULL)
727 i40e_shutdown_lan_hmc(hw);
729 i40e_shutdown_adminq(hw);
731 ixl_free_pci_resources(pf);
733 IXL_PF_LOCK_DESTROY(pf);
737 /*********************************************************************
738 * Device removal routine
740 * The detach entry point is called when the driver is being removed.
741 * This routine stops the adapter and deallocates all the resources
742 * that were allocated for driver operation.
744 * return 0 on success, positive on failure
745 *********************************************************************/
748 ixl_detach(device_t dev)
750 struct ixl_pf *pf = device_get_softc(dev);
751 struct i40e_hw *hw = &pf->hw;
752 struct ixl_vsi *vsi = &pf->vsi;
753 struct ixl_queue *que = vsi->queues;
759 INIT_DEBUGOUT("ixl_detach: begin");
761 /* Make sure VLANS are not using driver */
762 if (vsi->ifp->if_vlantrunk != NULL) {
763 device_printf(dev,"Vlan in use, detach first\n");
768 error = pci_iov_detach(dev);
770 device_printf(dev, "SR-IOV in use; detach first.\n");
775 ether_ifdetach(vsi->ifp);
776 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
782 for (int i = 0; i < vsi->num_queues; i++, que++) {
784 taskqueue_drain(que->tq, &que->task);
785 taskqueue_drain(que->tq, &que->tx_task);
786 taskqueue_free(que->tq);
790 /* Shutdown LAN HMC */
791 status = i40e_shutdown_lan_hmc(hw);
794 "Shutdown LAN HMC failed with code %d\n", status);
796 /* Shutdown admin queue */
797 status = i40e_shutdown_adminq(hw);
800 "Shutdown Admin queue failed with code %d\n", status);
802 /* Unregister VLAN events */
803 if (vsi->vlan_attach != NULL)
804 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
805 if (vsi->vlan_detach != NULL)
806 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
808 callout_drain(&pf->timer);
810 netmap_detach(vsi->ifp);
811 #endif /* DEV_NETMAP */
812 ixl_free_pci_resources(pf);
813 bus_generic_detach(dev);
816 IXL_PF_LOCK_DESTROY(pf);
820 /*********************************************************************
822 * Shutdown entry point
824 **********************************************************************/
827 ixl_shutdown(device_t dev)
829 struct ixl_pf *pf = device_get_softc(dev);
837 /*********************************************************************
839 * Get the hardware capabilities
841 **********************************************************************/
844 ixl_get_hw_capabilities(struct ixl_pf *pf)
846 struct i40e_aqc_list_capabilities_element_resp *buf;
847 struct i40e_hw *hw = &pf->hw;
848 device_t dev = pf->dev;
853 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
855 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
856 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
857 device_printf(dev, "Unable to allocate cap memory\n");
861 /* This populates the hw struct */
862 error = i40e_aq_discover_capabilities(hw, buf, len,
863 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
865 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
867 /* retry once with a larger buffer */
871 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
872 device_printf(dev, "capability discovery failed: %d\n",
873 pf->hw.aq.asq_last_status);
877 /* Capture this PF's starting queue pair */
878 pf->qbase = hw->func_caps.base_queue;
881 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
882 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
883 hw->pf_id, hw->func_caps.num_vfs,
884 hw->func_caps.num_msix_vectors,
885 hw->func_caps.num_msix_vectors_vf,
886 hw->func_caps.fd_filters_guaranteed,
887 hw->func_caps.fd_filters_best_effort,
888 hw->func_caps.num_tx_qp,
889 hw->func_caps.num_rx_qp,
890 hw->func_caps.base_queue);
896 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
898 device_t dev = vsi->dev;
900 /* Enable/disable TXCSUM/TSO4 */
901 if (!(ifp->if_capenable & IFCAP_TXCSUM)
902 && !(ifp->if_capenable & IFCAP_TSO4)) {
903 if (mask & IFCAP_TXCSUM) {
904 ifp->if_capenable |= IFCAP_TXCSUM;
905 /* enable TXCSUM, restore TSO if previously enabled */
906 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
907 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
908 ifp->if_capenable |= IFCAP_TSO4;
911 else if (mask & IFCAP_TSO4) {
912 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
913 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
915 "TSO4 requires txcsum, enabling both...\n");
917 } else if((ifp->if_capenable & IFCAP_TXCSUM)
918 && !(ifp->if_capenable & IFCAP_TSO4)) {
919 if (mask & IFCAP_TXCSUM)
920 ifp->if_capenable &= ~IFCAP_TXCSUM;
921 else if (mask & IFCAP_TSO4)
922 ifp->if_capenable |= IFCAP_TSO4;
923 } else if((ifp->if_capenable & IFCAP_TXCSUM)
924 && (ifp->if_capenable & IFCAP_TSO4)) {
925 if (mask & IFCAP_TXCSUM) {
926 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
927 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
929 "TSO4 requires txcsum, disabling both...\n");
930 } else if (mask & IFCAP_TSO4)
931 ifp->if_capenable &= ~IFCAP_TSO4;
934 /* Enable/disable TXCSUM_IPV6/TSO6 */
935 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
936 && !(ifp->if_capenable & IFCAP_TSO6)) {
937 if (mask & IFCAP_TXCSUM_IPV6) {
938 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
939 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
940 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
941 ifp->if_capenable |= IFCAP_TSO6;
943 } else if (mask & IFCAP_TSO6) {
944 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
945 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
947 "TSO6 requires txcsum6, enabling both...\n");
949 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
950 && !(ifp->if_capenable & IFCAP_TSO6)) {
951 if (mask & IFCAP_TXCSUM_IPV6)
952 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
953 else if (mask & IFCAP_TSO6)
954 ifp->if_capenable |= IFCAP_TSO6;
955 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
956 && (ifp->if_capenable & IFCAP_TSO6)) {
957 if (mask & IFCAP_TXCSUM_IPV6) {
958 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
959 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
961 "TSO6 requires txcsum6, disabling both...\n");
962 } else if (mask & IFCAP_TSO6)
963 ifp->if_capenable &= ~IFCAP_TSO6;
967 /*********************************************************************
970 * ixl_ioctl is called when the user wants to configure the
973 * return 0 on success, positive on failure
974 **********************************************************************/
977 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
979 struct ixl_vsi *vsi = ifp->if_softc;
980 struct ixl_pf *pf = vsi->back;
981 struct ifreq *ifr = (struct ifreq *) data;
982 #if defined(INET) || defined(INET6)
983 struct ifaddr *ifa = (struct ifaddr *)data;
984 bool avoid_reset = FALSE;
992 if (ifa->ifa_addr->sa_family == AF_INET)
996 if (ifa->ifa_addr->sa_family == AF_INET6)
999 #if defined(INET) || defined(INET6)
1001 ** Calling init results in link renegotiation,
1002 ** so we avoid doing it when possible.
1005 ifp->if_flags |= IFF_UP;
1006 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1009 if (!(ifp->if_flags & IFF_NOARP))
1010 arp_ifinit(ifp, ifa);
1013 error = ether_ioctl(ifp, command, data);
1017 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1018 if (ifr->ifr_mtu > IXL_MAX_FRAME -
1019 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1023 ifp->if_mtu = ifr->ifr_mtu;
1024 vsi->max_frame_size =
1025 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1026 + ETHER_VLAN_ENCAP_LEN;
1027 ixl_init_locked(pf);
1032 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1034 if (ifp->if_flags & IFF_UP) {
1035 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1036 if ((ifp->if_flags ^ pf->if_flags) &
1037 (IFF_PROMISC | IFF_ALLMULTI)) {
1038 ixl_set_promisc(vsi);
1041 ixl_init_locked(pf);
1043 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1045 pf->if_flags = ifp->if_flags;
1049 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1050 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1052 ixl_disable_intr(vsi);
1054 ixl_enable_intr(vsi);
1059 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1060 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1062 ixl_disable_intr(vsi);
1064 ixl_enable_intr(vsi);
1070 #ifdef IFM_ETH_XTYPE
1073 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1074 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1078 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1079 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1081 ixl_cap_txcsum_tso(vsi, ifp, mask);
1083 if (mask & IFCAP_RXCSUM)
1084 ifp->if_capenable ^= IFCAP_RXCSUM;
1085 if (mask & IFCAP_RXCSUM_IPV6)
1086 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1087 if (mask & IFCAP_LRO)
1088 ifp->if_capenable ^= IFCAP_LRO;
1089 if (mask & IFCAP_VLAN_HWTAGGING)
1090 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1091 if (mask & IFCAP_VLAN_HWFILTER)
1092 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1093 if (mask & IFCAP_VLAN_HWTSO)
1094 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1095 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1097 ixl_init_locked(pf);
1100 VLAN_CAPABILITIES(ifp);
1106 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1107 error = ether_ioctl(ifp, command, data);
1115 /*********************************************************************
1118 * This routine is used in two ways. It is used by the stack as
1119 * init entry point in network interface structure. It is also used
1120 * by the driver as a hw/sw initialization routine to get to a
1123 * return 0 on success, positive on failure
1124 **********************************************************************/
1127 ixl_init_locked(struct ixl_pf *pf)
1129 struct i40e_hw *hw = &pf->hw;
1130 struct ixl_vsi *vsi = &pf->vsi;
1131 struct ifnet *ifp = vsi->ifp;
1132 device_t dev = pf->dev;
1133 struct i40e_filter_control_settings filter;
1134 u8 tmpaddr[ETHER_ADDR_LEN];
1137 mtx_assert(&pf->pf_mtx, MA_OWNED);
1138 INIT_DEBUGOUT("ixl_init: begin");
1141 /* Get the latest mac address... User might use a LAA */
1142 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1143 I40E_ETH_LENGTH_OF_ADDRESS);
1144 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1145 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1146 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1147 bcopy(tmpaddr, hw->mac.addr,
1148 I40E_ETH_LENGTH_OF_ADDRESS);
1149 ret = i40e_aq_mac_address_write(hw,
1150 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1151 hw->mac.addr, NULL);
1153 device_printf(dev, "LLA address"
1154 "change failed!!\n");
1157 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1161 /* Set the various hardware offload abilities */
1162 ifp->if_hwassist = 0;
1163 if (ifp->if_capenable & IFCAP_TSO)
1164 ifp->if_hwassist |= CSUM_TSO;
1165 if (ifp->if_capenable & IFCAP_TXCSUM)
1166 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1167 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1168 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1170 /* Set up the device filtering */
1171 bzero(&filter, sizeof(filter));
1172 filter.enable_ethtype = TRUE;
1173 filter.enable_macvlan = TRUE;
1175 filter.enable_fdir = TRUE;
1177 if (i40e_set_filter_control(hw, &filter))
1178 device_printf(dev, "set_filter_control() failed\n");
1181 ixl_config_rss(vsi);
1184 ** Prepare the VSI: rings, hmc contexts, etc...
1186 if (ixl_initialize_vsi(vsi)) {
1187 device_printf(dev, "initialize vsi failed!!\n");
1191 /* Add protocol filters to list */
1192 ixl_init_filters(vsi);
1194 /* Setup vlan's if needed */
1195 ixl_setup_vlan_filters(vsi);
1197 /* Start the local timer */
1198 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1200 /* Set up MSI/X routing and the ITR settings */
1201 if (ixl_enable_msix) {
1202 ixl_configure_msix(pf);
1203 ixl_configure_itr(pf);
1205 ixl_configure_legacy(pf);
1207 ixl_enable_rings(vsi);
1209 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1211 ixl_reconfigure_filters(vsi);
1213 /* Set MTU in hardware*/
1214 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1217 device_printf(vsi->dev,
1218 "aq_set_mac_config in init error, code %d\n",
1221 /* And now turn on interrupts */
1222 ixl_enable_intr(vsi);
1224 /* Now inform the stack we're ready */
1225 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1226 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1234 struct ixl_pf *pf = arg;
1237 ixl_init_locked(pf);
1244 ** MSIX Interrupt Handlers and Tasklets
1248 ixl_handle_que(void *context, int pending)
1250 struct ixl_queue *que = context;
1251 struct ixl_vsi *vsi = que->vsi;
1252 struct i40e_hw *hw = vsi->hw;
1253 struct tx_ring *txr = &que->txr;
1254 struct ifnet *ifp = vsi->ifp;
1257 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1258 more = ixl_rxeof(que, IXL_RX_LIMIT);
1261 if (!drbr_empty(ifp, txr->br))
1262 ixl_mq_start_locked(ifp, txr);
1265 taskqueue_enqueue(que->tq, &que->task);
1270 /* Reenable this interrupt - hmmm */
1271 ixl_enable_queue(hw, que->me);
1276 /*********************************************************************
1278 * Legacy Interrupt Service routine
1280 **********************************************************************/
1284 struct ixl_pf *pf = arg;
1285 struct i40e_hw *hw = &pf->hw;
1286 struct ixl_vsi *vsi = &pf->vsi;
1287 struct ixl_queue *que = vsi->queues;
1288 struct ifnet *ifp = vsi->ifp;
1289 struct tx_ring *txr = &que->txr;
1290 u32 reg, icr0, mask;
1291 bool more_tx, more_rx;
1295 /* Protect against spurious interrupts */
1296 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1299 icr0 = rd32(hw, I40E_PFINT_ICR0);
1301 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1302 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1303 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1305 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1308 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1309 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1312 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1313 taskqueue_enqueue(pf->tq, &pf->adminq);
1317 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1320 more_tx = ixl_txeof(que);
1321 if (!drbr_empty(vsi->ifp, txr->br))
1325 /* re-enable other interrupt causes */
1326 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1328 /* And now the queues */
1329 reg = rd32(hw, I40E_QINT_RQCTL(0));
1330 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1331 wr32(hw, I40E_QINT_RQCTL(0), reg);
1333 reg = rd32(hw, I40E_QINT_TQCTL(0));
1334 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1335 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1336 wr32(hw, I40E_QINT_TQCTL(0), reg);
1338 ixl_enable_legacy(hw);
1344 /*********************************************************************
1346 * MSIX VSI Interrupt Service routine
1348 **********************************************************************/
1350 ixl_msix_que(void *arg)
1352 struct ixl_queue *que = arg;
1353 struct ixl_vsi *vsi = que->vsi;
1354 struct i40e_hw *hw = vsi->hw;
1355 struct tx_ring *txr = &que->txr;
1356 bool more_tx, more_rx;
1358 /* Protect against spurious interrupts */
1359 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1364 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1367 more_tx = ixl_txeof(que);
1369 ** Make certain that if the stack
1370 ** has anything queued the task gets
1371 ** scheduled to handle it.
1373 if (!drbr_empty(vsi->ifp, txr->br))
1377 ixl_set_queue_rx_itr(que);
1378 ixl_set_queue_tx_itr(que);
1380 if (more_tx || more_rx)
1381 taskqueue_enqueue(que->tq, &que->task);
1383 ixl_enable_queue(hw, que->me);
1389 /*********************************************************************
1391 * MSIX Admin Queue Interrupt Service routine
1393 **********************************************************************/
1395 ixl_msix_adminq(void *arg)
1397 struct ixl_pf *pf = arg;
1398 struct i40e_hw *hw = &pf->hw;
1403 reg = rd32(hw, I40E_PFINT_ICR0);
1404 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1406 /* Check on the cause */
1407 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1408 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1410 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1411 ixl_handle_mdd_event(pf);
1412 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1416 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1417 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1418 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1422 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1423 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1424 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1426 taskqueue_enqueue(pf->tq, &pf->adminq);
1430 /*********************************************************************
1432 * Media Ioctl callback
1434 * This routine is called whenever the user queries the status of
1435 * the interface using ifconfig.
1437 **********************************************************************/
1439 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1441 struct ixl_vsi *vsi = ifp->if_softc;
1442 struct ixl_pf *pf = vsi->back;
1443 struct i40e_hw *hw = &pf->hw;
1445 INIT_DEBUGOUT("ixl_media_status: begin");
1448 hw->phy.get_link_info = TRUE;
1449 i40e_get_link_status(hw, &pf->link_up);
1450 ixl_update_link_status(pf);
1452 ifmr->ifm_status = IFM_AVALID;
1453 ifmr->ifm_active = IFM_ETHER;
1460 ifmr->ifm_status |= IFM_ACTIVE;
1461 /* Hardware is always full-duplex */
1462 ifmr->ifm_active |= IFM_FDX;
1464 switch (hw->phy.link_info.phy_type) {
1466 case I40E_PHY_TYPE_100BASE_TX:
1467 ifmr->ifm_active |= IFM_100_TX;
1470 case I40E_PHY_TYPE_1000BASE_T:
1471 ifmr->ifm_active |= IFM_1000_T;
1473 case I40E_PHY_TYPE_1000BASE_SX:
1474 ifmr->ifm_active |= IFM_1000_SX;
1476 case I40E_PHY_TYPE_1000BASE_LX:
1477 ifmr->ifm_active |= IFM_1000_LX;
1480 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1481 ifmr->ifm_active |= IFM_10G_TWINAX;
1483 case I40E_PHY_TYPE_10GBASE_SR:
1484 ifmr->ifm_active |= IFM_10G_SR;
1486 case I40E_PHY_TYPE_10GBASE_LR:
1487 ifmr->ifm_active |= IFM_10G_LR;
1489 case I40E_PHY_TYPE_10GBASE_T:
1490 ifmr->ifm_active |= IFM_10G_T;
1493 case I40E_PHY_TYPE_40GBASE_CR4:
1494 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1495 ifmr->ifm_active |= IFM_40G_CR4;
1497 case I40E_PHY_TYPE_40GBASE_SR4:
1498 ifmr->ifm_active |= IFM_40G_SR4;
1500 case I40E_PHY_TYPE_40GBASE_LR4:
1501 ifmr->ifm_active |= IFM_40G_LR4;
1503 #ifndef IFM_ETH_XTYPE
1504 case I40E_PHY_TYPE_1000BASE_KX:
1505 ifmr->ifm_active |= IFM_1000_CX;
1507 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1508 case I40E_PHY_TYPE_10GBASE_CR1:
1509 ifmr->ifm_active |= IFM_10G_TWINAX;
1511 case I40E_PHY_TYPE_10GBASE_KX4:
1512 ifmr->ifm_active |= IFM_10G_CX4;
1514 case I40E_PHY_TYPE_10GBASE_KR:
1515 ifmr->ifm_active |= IFM_10G_SR;
1517 case I40E_PHY_TYPE_40GBASE_KR4:
1518 case I40E_PHY_TYPE_XLPPI:
1519 ifmr->ifm_active |= IFM_40G_SR4;
1522 case I40E_PHY_TYPE_1000BASE_KX:
1523 ifmr->ifm_active |= IFM_1000_KX;
1525 /* ERJ: What's the difference between these? */
1526 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1527 case I40E_PHY_TYPE_10GBASE_CR1:
1528 ifmr->ifm_active |= IFM_10G_CR1;
1530 case I40E_PHY_TYPE_10GBASE_KX4:
1531 ifmr->ifm_active |= IFM_10G_KX4;
1533 case I40E_PHY_TYPE_10GBASE_KR:
1534 ifmr->ifm_active |= IFM_10G_KR;
1536 case I40E_PHY_TYPE_20GBASE_KR2:
1537 ifmr->ifm_active |= IFM_20G_KR2;
1539 case I40E_PHY_TYPE_40GBASE_KR4:
1540 ifmr->ifm_active |= IFM_40G_KR4;
1542 case I40E_PHY_TYPE_XLPPI:
1543 ifmr->ifm_active |= IFM_40G_XLPPI;
1547 ifmr->ifm_active |= IFM_UNKNOWN;
1550 /* Report flow control status as well */
1551 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1552 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1553 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1554 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1561 /*********************************************************************
1563 * Media Ioctl callback
1565 * This routine is called when the user changes speed/duplex using
1566 * media/mediopt option with ifconfig.
1568 **********************************************************************/
1570 ixl_media_change(struct ifnet * ifp)
1572 struct ixl_vsi *vsi = ifp->if_softc;
1573 struct ifmedia *ifm = &vsi->media;
1575 INIT_DEBUGOUT("ixl_media_change: begin");
1577 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1580 if_printf(ifp, "Media change is currently not supported.\n");
1588 ** ATR: Application Targetted Receive - creates a filter
1589 ** based on TX flow info that will keep the receive
1590 ** portion of the flow on the same queue. Based on the
1591 ** implementation this is only available for TCP connections
1594 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1596 struct ixl_vsi *vsi = que->vsi;
1597 struct tx_ring *txr = &que->txr;
1598 struct i40e_filter_program_desc *FDIR;
1602 /* check if ATR is enabled and sample rate */
1603 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1606 ** We sample all TCP SYN/FIN packets,
1607 ** or at the selected sample rate
1610 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1611 (txr->atr_count < txr->atr_rate))
1615 /* Get a descriptor to use */
1616 idx = txr->next_avail;
1617 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1618 if (++idx == que->num_desc)
1621 txr->next_avail = idx;
1623 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1624 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1626 ptype |= (etype == ETHERTYPE_IP) ?
1627 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1628 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1629 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1630 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1632 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1634 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1637 ** We use the TCP TH_FIN as a trigger to remove
1638 ** the filter, otherwise its an update.
1640 dtype |= (th->th_flags & TH_FIN) ?
1641 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1642 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1643 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1644 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1646 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1647 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1649 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1650 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1652 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1653 FDIR->dtype_cmd_cntindex = htole32(dtype);
1660 ixl_set_promisc(struct ixl_vsi *vsi)
1662 struct ifnet *ifp = vsi->ifp;
1663 struct i40e_hw *hw = vsi->hw;
1665 bool uni = FALSE, multi = FALSE;
1667 if (ifp->if_flags & IFF_ALLMULTI)
1669 else { /* Need to count the multicast addresses */
1670 struct ifmultiaddr *ifma;
1671 if_maddr_rlock(ifp);
1672 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1673 if (ifma->ifma_addr->sa_family != AF_LINK)
1675 if (mcnt == MAX_MULTICAST_ADDR)
1679 if_maddr_runlock(ifp);
1682 if (mcnt >= MAX_MULTICAST_ADDR)
1684 if (ifp->if_flags & IFF_PROMISC)
1687 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1688 vsi->seid, uni, NULL);
1689 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1690 vsi->seid, multi, NULL);
1694 /*********************************************************************
1697 * Routines for multicast and vlan filter management.
1699 *********************************************************************/
1701 ixl_add_multi(struct ixl_vsi *vsi)
1703 struct ifmultiaddr *ifma;
1704 struct ifnet *ifp = vsi->ifp;
1705 struct i40e_hw *hw = vsi->hw;
1706 int mcnt = 0, flags;
1708 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1710 if_maddr_rlock(ifp);
1712 ** First just get a count, to decide if we
1713 ** we simply use multicast promiscuous.
1715 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1716 if (ifma->ifma_addr->sa_family != AF_LINK)
1720 if_maddr_runlock(ifp);
1722 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1723 /* delete existing MC filters */
1724 ixl_del_hw_filters(vsi, mcnt);
1725 i40e_aq_set_vsi_multicast_promiscuous(hw,
1726 vsi->seid, TRUE, NULL);
1731 if_maddr_rlock(ifp);
1732 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1733 if (ifma->ifma_addr->sa_family != AF_LINK)
1735 ixl_add_mc_filter(vsi,
1736 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1739 if_maddr_runlock(ifp);
1741 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1742 ixl_add_hw_filters(vsi, flags, mcnt);
1745 IOCTL_DEBUGOUT("ixl_add_multi: end");
1750 ixl_del_multi(struct ixl_vsi *vsi)
1752 struct ifnet *ifp = vsi->ifp;
1753 struct ifmultiaddr *ifma;
1754 struct ixl_mac_filter *f;
1758 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1760 /* Search for removed multicast addresses */
1761 if_maddr_rlock(ifp);
1762 SLIST_FOREACH(f, &vsi->ftl, next) {
1763 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1765 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1766 if (ifma->ifma_addr->sa_family != AF_LINK)
1768 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1769 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1774 if (match == FALSE) {
1775 f->flags |= IXL_FILTER_DEL;
1780 if_maddr_runlock(ifp);
1783 ixl_del_hw_filters(vsi, mcnt);
1787 /*********************************************************************
1790 * This routine checks for link status,updates statistics,
1791 * and runs the watchdog check.
1793 **********************************************************************/
1796 ixl_local_timer(void *arg)
1798 struct ixl_pf *pf = arg;
1799 struct i40e_hw *hw = &pf->hw;
1800 struct ixl_vsi *vsi = &pf->vsi;
1801 struct ixl_queue *que = vsi->queues;
1802 device_t dev = pf->dev;
1806 mtx_assert(&pf->pf_mtx, MA_OWNED);
1808 /* Fire off the adminq task */
1809 taskqueue_enqueue(pf->tq, &pf->adminq);
1812 ixl_update_stats_counters(pf);
1815 ** Check status of the queues
1817 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1818 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1820 for (int i = 0; i < vsi->num_queues; i++,que++) {
1821 /* Any queues with outstanding work get a sw irq */
1823 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1825 ** Each time txeof runs without cleaning, but there
1826 ** are uncleaned descriptors it increments busy. If
1827 ** we get to 5 we declare it hung.
1829 if (que->busy == IXL_QUEUE_HUNG) {
1831 /* Mark the queue as inactive */
1832 vsi->active_queues &= ~((u64)1 << que->me);
1835 /* Check if we've come back from hung */
1836 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1837 vsi->active_queues |= ((u64)1 << que->me);
1839 if (que->busy >= IXL_MAX_TX_BUSY) {
1841 device_printf(dev,"Warning queue %d "
1842 "appears to be hung!\n", i);
1844 que->busy = IXL_QUEUE_HUNG;
1848 /* Only reinit if all queues show hung */
1849 if (hung == vsi->num_queues)
1852 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1856 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1857 ixl_init_locked(pf);
1861 ** Note: this routine updates the OS on the link state
1862 ** the real check of the hardware only happens with
1863 ** a link interrupt.
1866 ixl_update_link_status(struct ixl_pf *pf)
1868 struct ixl_vsi *vsi = &pf->vsi;
1869 struct i40e_hw *hw = &pf->hw;
1870 struct ifnet *ifp = vsi->ifp;
1871 device_t dev = pf->dev;
1874 if (vsi->link_active == FALSE) {
1875 pf->fc = hw->fc.current_mode;
1877 device_printf(dev,"Link is up %d Gbps %s,"
1878 " Flow Control: %s\n",
1880 I40E_LINK_SPEED_40GB)? 40:10),
1881 "Full Duplex", ixl_fc_string[pf->fc]);
1883 vsi->link_active = TRUE;
1885 ** Warn user if link speed on NPAR enabled
1886 ** partition is not at least 10GB
1888 if (hw->func_caps.npar_enable &&
1889 (hw->phy.link_info.link_speed ==
1890 I40E_LINK_SPEED_1GB ||
1891 hw->phy.link_info.link_speed ==
1892 I40E_LINK_SPEED_100MB))
1893 device_printf(dev, "The partition detected"
1894 "link speed that is less than 10Gbps\n");
1895 if_link_state_change(ifp, LINK_STATE_UP);
1897 } else { /* Link down */
1898 if (vsi->link_active == TRUE) {
1900 device_printf(dev,"Link is Down\n");
1901 if_link_state_change(ifp, LINK_STATE_DOWN);
1902 vsi->link_active = FALSE;
1909 /*********************************************************************
1911 * This routine disables all traffic on the adapter by issuing a
1912 * global reset on the MAC and deallocates TX/RX buffers.
1914 **********************************************************************/
1917 ixl_stop(struct ixl_pf *pf)
1919 struct ixl_vsi *vsi = &pf->vsi;
1920 struct ifnet *ifp = vsi->ifp;
1922 mtx_assert(&pf->pf_mtx, MA_OWNED);
1924 INIT_DEBUGOUT("ixl_stop: begin\n");
1925 if (pf->num_vfs == 0)
1926 ixl_disable_intr(vsi);
1928 ixl_disable_rings_intr(vsi);
1929 ixl_disable_rings(vsi);
1931 /* Tell the stack that the interface is no longer active */
1932 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1934 /* Stop the local timer */
1935 callout_stop(&pf->timer);
1941 /*********************************************************************
1943 * Setup MSIX Interrupt resources and handlers for the VSI
1945 **********************************************************************/
1947 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1949 device_t dev = pf->dev;
1950 struct ixl_vsi *vsi = &pf->vsi;
1951 struct ixl_queue *que = vsi->queues;
1956 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1957 &rid, RF_SHAREABLE | RF_ACTIVE);
1958 if (pf->res == NULL) {
1959 device_printf(dev,"Unable to allocate"
1960 " bus resource: vsi legacy/msi interrupt\n");
1964 /* Set the handler function */
1965 error = bus_setup_intr(dev, pf->res,
1966 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1967 ixl_intr, pf, &pf->tag);
1970 device_printf(dev, "Failed to register legacy/msi handler");
1973 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1974 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1975 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1976 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1977 taskqueue_thread_enqueue, &que->tq);
1978 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1979 device_get_nameunit(dev));
1980 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1983 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1986 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1987 taskqueue_thread_enqueue, &pf->tq);
1988 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1989 device_get_nameunit(dev));
1995 /*********************************************************************
1997 * Setup MSIX Interrupt resources and handlers for the VSI
1999 **********************************************************************/
2001 ixl_assign_vsi_msix(struct ixl_pf *pf)
2003 device_t dev = pf->dev;
2004 struct ixl_vsi *vsi = &pf->vsi;
2005 struct ixl_queue *que = vsi->queues;
2006 struct tx_ring *txr;
2007 int error, rid, vector = 0;
2009 /* Admin Que is vector 0*/
2011 pf->res = bus_alloc_resource_any(dev,
2012 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2014 device_printf(dev,"Unable to allocate"
2015 " bus resource: Adminq interrupt [%d]\n", rid);
2018 /* Set the adminq vector and handler */
2019 error = bus_setup_intr(dev, pf->res,
2020 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2021 ixl_msix_adminq, pf, &pf->tag);
2024 device_printf(dev, "Failed to register Admin que handler");
2027 bus_describe_intr(dev, pf->res, pf->tag, "aq");
2028 pf->admvec = vector;
2029 /* Tasklet for Admin Queue */
2030 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2033 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2036 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2037 taskqueue_thread_enqueue, &pf->tq);
2038 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2039 device_get_nameunit(pf->dev));
2042 /* Now set up the stations */
2043 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2047 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2048 RF_SHAREABLE | RF_ACTIVE);
2049 if (que->res == NULL) {
2050 device_printf(dev,"Unable to allocate"
2051 " bus resource: que interrupt [%d]\n", vector);
2054 /* Set the handler function */
2055 error = bus_setup_intr(dev, que->res,
2056 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2057 ixl_msix_que, que, &que->tag);
2060 device_printf(dev, "Failed to register que handler");
2063 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2064 /* Bind the vector to a CPU */
2066 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2068 bus_bind_intr(dev, que->res, cpu_id);
2070 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2071 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2072 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2073 taskqueue_thread_enqueue, &que->tq);
2075 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
2076 cpu_id, "%s (bucket %d)",
2077 device_get_nameunit(dev), cpu_id);
2079 taskqueue_start_threads(&que->tq, 1, PI_NET,
2080 "%s que", device_get_nameunit(dev));
2089 * Allocate MSI/X vectors
2092 ixl_init_msix(struct ixl_pf *pf)
2094 device_t dev = pf->dev;
2095 int rid, want, vectors, queues, available;
2097 /* Override by tuneable */
2098 if (ixl_enable_msix == 0)
2102 ** When used in a virtualized environment
2103 ** PCI BUSMASTER capability may not be set
2104 ** so explicity set it here and rewrite
2105 ** the ENABLE in the MSIX control register
2106 ** at this point to cause the host to
2107 ** successfully initialize us.
2112 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2113 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2114 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2115 pci_find_cap(dev, PCIY_MSIX, &rid);
2116 rid += PCIR_MSIX_CTRL;
2117 msix_ctrl = pci_read_config(dev, rid, 2);
2118 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2119 pci_write_config(dev, rid, msix_ctrl, 2);
2122 /* First try MSI/X */
2123 rid = PCIR_BAR(IXL_BAR);
2124 pf->msix_mem = bus_alloc_resource_any(dev,
2125 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2126 if (!pf->msix_mem) {
2127 /* May not be enabled */
2128 device_printf(pf->dev,
2129 "Unable to map MSIX table \n");
2133 available = pci_msix_count(dev);
2134 if (available == 0) { /* system has msix disabled */
2135 bus_release_resource(dev, SYS_RES_MEMORY,
2137 pf->msix_mem = NULL;
2141 /* Figure out a reasonable auto config value */
2142 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2144 /* Override with hardcoded value if sane */
2145 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2146 queues = ixl_max_queues;
2149 /* If we're doing RSS, clamp at the number of RSS buckets */
2150 if (queues > rss_getnumbuckets())
2151 queues = rss_getnumbuckets();
2155 ** Want one vector (RX/TX pair) per queue
2156 ** plus an additional for the admin queue.
2159 if (want <= available) /* Have enough */
2162 device_printf(pf->dev,
2163 "MSIX Configuration Problem, "
2164 "%d vectors available but %d wanted!\n",
2166 return (0); /* Will go to Legacy setup */
2169 if (pci_alloc_msix(dev, &vectors) == 0) {
2170 device_printf(pf->dev,
2171 "Using MSIX interrupts with %d vectors\n", vectors);
2173 pf->vsi.num_queues = queues;
2176 * If we're doing RSS, the number of queues needs to
2177 * match the number of RSS buckets that are configured.
2179 * + If there's more queues than RSS buckets, we'll end
2180 * up with queues that get no traffic.
2182 * + If there's more RSS buckets than queues, we'll end
2183 * up having multiple RSS buckets map to the same queue,
2184 * so there'll be some contention.
2186 if (queues != rss_getnumbuckets()) {
2188 "%s: queues (%d) != RSS buckets (%d)"
2189 "; performance will be impacted.\n",
2190 __func__, queues, rss_getnumbuckets());
2196 vectors = pci_msi_count(dev);
2197 pf->vsi.num_queues = 1;
2200 ixl_enable_msix = 0;
2201 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2202 device_printf(pf->dev,"Using an MSI interrupt\n");
2205 device_printf(pf->dev,"Using a Legacy interrupt\n");
2212 * Plumb MSI/X vectors
2215 ixl_configure_msix(struct ixl_pf *pf)
2217 struct i40e_hw *hw = &pf->hw;
2218 struct ixl_vsi *vsi = &pf->vsi;
2222 /* First set up the adminq - vector 0 */
2223 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2224 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2226 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2227 I40E_PFINT_ICR0_ENA_GRST_MASK |
2228 I40E_PFINT_ICR0_HMC_ERR_MASK |
2229 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2230 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2231 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2232 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2233 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2235 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2236 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2238 wr32(hw, I40E_PFINT_DYN_CTL0,
2239 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2240 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2242 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2244 /* Next configure the queues */
2245 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2246 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2247 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2249 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2250 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2251 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2252 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2253 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2254 wr32(hw, I40E_QINT_RQCTL(i), reg);
2256 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2257 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2258 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2259 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2260 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2261 if (i == (vsi->num_queues - 1))
2262 reg |= (IXL_QUEUE_EOL
2263 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2264 wr32(hw, I40E_QINT_TQCTL(i), reg);
2269 * Configure for MSI single vector operation
2272 ixl_configure_legacy(struct ixl_pf *pf)
2274 struct i40e_hw *hw = &pf->hw;
2278 wr32(hw, I40E_PFINT_ITR0(0), 0);
2279 wr32(hw, I40E_PFINT_ITR0(1), 0);
2282 /* Setup "other" causes */
2283 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2284 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2285 | I40E_PFINT_ICR0_ENA_GRST_MASK
2286 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2287 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2288 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2289 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2290 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2291 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2292 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2294 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2296 /* SW_ITR_IDX = 0, but don't change INTENA */
2297 wr32(hw, I40E_PFINT_DYN_CTL0,
2298 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2299 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2300 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2301 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2303 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2304 wr32(hw, I40E_PFINT_LNKLST0, 0);
2306 /* Associate the queue pair to the vector and enable the q int */
2307 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2308 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2309 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2310 wr32(hw, I40E_QINT_RQCTL(0), reg);
2312 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2313 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2314 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2315 wr32(hw, I40E_QINT_TQCTL(0), reg);
2317 /* Next enable the queue pair */
2318 reg = rd32(hw, I40E_QTX_ENA(0));
2319 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2320 wr32(hw, I40E_QTX_ENA(0), reg);
2322 reg = rd32(hw, I40E_QRX_ENA(0));
2323 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2324 wr32(hw, I40E_QRX_ENA(0), reg);
2329 * Set the Initial ITR state
2332 ixl_configure_itr(struct ixl_pf *pf)
2334 struct i40e_hw *hw = &pf->hw;
2335 struct ixl_vsi *vsi = &pf->vsi;
2336 struct ixl_queue *que = vsi->queues;
2338 vsi->rx_itr_setting = ixl_rx_itr;
2339 if (ixl_dynamic_rx_itr)
2340 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2341 vsi->tx_itr_setting = ixl_tx_itr;
2342 if (ixl_dynamic_tx_itr)
2343 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2345 for (int i = 0; i < vsi->num_queues; i++, que++) {
2346 struct tx_ring *txr = &que->txr;
2347 struct rx_ring *rxr = &que->rxr;
2349 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2350 vsi->rx_itr_setting);
2351 rxr->itr = vsi->rx_itr_setting;
2352 rxr->latency = IXL_AVE_LATENCY;
2353 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2354 vsi->tx_itr_setting);
2355 txr->itr = vsi->tx_itr_setting;
2356 txr->latency = IXL_AVE_LATENCY;
2362 ixl_allocate_pci_resources(struct ixl_pf *pf)
2365 device_t dev = pf->dev;
2368 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2371 if (!(pf->pci_mem)) {
2372 device_printf(dev,"Unable to allocate bus resource: memory\n");
2376 pf->osdep.mem_bus_space_tag =
2377 rman_get_bustag(pf->pci_mem);
2378 pf->osdep.mem_bus_space_handle =
2379 rman_get_bushandle(pf->pci_mem);
2380 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2381 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2382 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2384 pf->hw.back = &pf->osdep;
2387 ** Now setup MSI or MSI/X, should
2388 ** return us the number of supported
2389 ** vectors. (Will be 1 for MSI)
2391 pf->msix = ixl_init_msix(pf);
2396 ixl_free_pci_resources(struct ixl_pf * pf)
2398 struct ixl_vsi *vsi = &pf->vsi;
2399 struct ixl_queue *que = vsi->queues;
2400 device_t dev = pf->dev;
2403 memrid = PCIR_BAR(IXL_BAR);
2405 /* We may get here before stations are setup */
2406 if ((!ixl_enable_msix) || (que == NULL))
2410 ** Release all msix VSI resources:
2412 for (int i = 0; i < vsi->num_queues; i++, que++) {
2413 rid = que->msix + 1;
2414 if (que->tag != NULL) {
2415 bus_teardown_intr(dev, que->res, que->tag);
2418 if (que->res != NULL)
2419 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2423 /* Clean the AdminQ interrupt last */
2424 if (pf->admvec) /* we are doing MSIX */
2425 rid = pf->admvec + 1;
2427 (pf->msix != 0) ? (rid = 1):(rid = 0);
2429 if (pf->tag != NULL) {
2430 bus_teardown_intr(dev, pf->res, pf->tag);
2433 if (pf->res != NULL)
2434 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2437 pci_release_msi(dev);
2439 if (pf->msix_mem != NULL)
2440 bus_release_resource(dev, SYS_RES_MEMORY,
2441 memrid, pf->msix_mem);
2443 if (pf->pci_mem != NULL)
2444 bus_release_resource(dev, SYS_RES_MEMORY,
2445 PCIR_BAR(0), pf->pci_mem);
2451 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2453 /* Display supported media types */
2454 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2455 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2457 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2458 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2459 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2460 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2461 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2462 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2464 if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2465 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2466 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2467 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2469 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2470 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2471 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2472 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2473 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2474 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2476 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2477 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2478 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2479 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2480 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2481 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2482 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2483 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2484 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2485 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2487 #ifndef IFM_ETH_XTYPE
2488 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2489 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2491 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2492 phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2493 phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2494 phy_type & (1 << I40E_PHY_TYPE_SFI))
2495 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2496 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2497 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2498 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2499 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2501 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2502 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2503 if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2504 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2506 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2507 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2509 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2510 || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2511 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2512 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2513 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2514 if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2515 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2516 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2517 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2518 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2519 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2521 if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2522 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2524 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2525 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2526 if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2527 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2531 /*********************************************************************
2533 * Setup networking device structure and register an interface.
2535 **********************************************************************/
2537 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2540 struct i40e_hw *hw = vsi->hw;
2541 struct ixl_queue *que = vsi->queues;
2542 struct i40e_aq_get_phy_abilities_resp abilities;
2543 enum i40e_status_code aq_error = 0;
2545 INIT_DEBUGOUT("ixl_setup_interface: begin");
2547 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2549 device_printf(dev, "can not allocate ifnet structure\n");
2552 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2553 ifp->if_mtu = ETHERMTU;
2554 if_initbaudrate(ifp, IF_Gbps(40));
2555 ifp->if_init = ixl_init;
2556 ifp->if_softc = vsi;
2557 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2558 ifp->if_ioctl = ixl_ioctl;
2560 #if __FreeBSD_version >= 1100036
2561 if_setgetcounterfn(ifp, ixl_get_counter);
2564 ifp->if_transmit = ixl_mq_start;
2566 ifp->if_qflush = ixl_qflush;
2568 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2570 vsi->max_frame_size =
2571 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2572 + ETHER_VLAN_ENCAP_LEN;
2575 * Tell the upper layer(s) we support long frames.
2577 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2579 ifp->if_capabilities |= IFCAP_HWCSUM;
2580 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2581 ifp->if_capabilities |= IFCAP_TSO;
2582 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2583 ifp->if_capabilities |= IFCAP_LRO;
2585 /* VLAN capabilties */
2586 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2589 | IFCAP_VLAN_HWCSUM;
2590 ifp->if_capenable = ifp->if_capabilities;
2593 ** Don't turn this on by default, if vlans are
2594 ** created on another pseudo device (eg. lagg)
2595 ** then vlan events are not passed thru, breaking
2596 ** operation, but with HW FILTER off it works. If
2597 ** using vlans directly on the ixl driver you can
2598 ** enable this and get full hardware tag filtering.
2600 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2603 * Specify the media types supported by this adapter and register
2604 * callbacks to update media and link information
2606 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2609 aq_error = i40e_aq_get_phy_capabilities(hw,
2610 FALSE, TRUE, &abilities, NULL);
2611 /* May need delay to detect fiber correctly */
2612 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2613 i40e_msec_delay(200);
2614 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2615 TRUE, &abilities, NULL);
2618 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2619 device_printf(dev, "Unknown PHY type detected!\n");
2622 "Error getting supported media types, err %d,"
2623 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2627 ixl_add_ifmedia(vsi, abilities.phy_type);
2629 /* Use autoselect media by default */
2630 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2631 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2633 ether_ifattach(ifp, hw->mac.addr);
2639 ** Run when the Admin Queue gets a
2640 ** link transition interrupt.
2643 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2645 struct i40e_hw *hw = &pf->hw;
2646 struct i40e_aqc_get_link_status *status =
2647 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2650 hw->phy.get_link_info = TRUE;
2651 i40e_get_link_status(hw, &check);
2652 pf->link_up = check;
2654 printf("Link is %s\n", check ? "up":"down");
2656 /* Report if Unqualified modules are found */
2657 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2658 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2659 (!(status->link_info & I40E_AQ_LINK_UP)))
2660 device_printf(pf->dev, "Link failed because "
2661 "an unqualified module was detected\n");
2666 /*********************************************************************
2668 * Get Firmware Switch configuration
2669 * - this will need to be more robust when more complex
2670 * switch configurations are enabled.
2672 **********************************************************************/
2674 ixl_switch_config(struct ixl_pf *pf)
2676 struct i40e_hw *hw = &pf->hw;
2677 struct ixl_vsi *vsi = &pf->vsi;
2678 device_t dev = vsi->dev;
2679 struct i40e_aqc_get_switch_config_resp *sw_config;
2680 u8 aq_buf[I40E_AQ_LARGE_BUF];
2684 memset(&aq_buf, 0, sizeof(aq_buf));
2685 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2686 ret = i40e_aq_get_switch_config(hw, sw_config,
2687 sizeof(aq_buf), &next, NULL);
2689 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2695 "Switch config: header reported: %d in structure, %d total\n",
2696 sw_config->header.num_reported, sw_config->header.num_total);
2697 for (int i = 0; i < sw_config->header.num_reported; i++) {
2699 "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2700 sw_config->element[i].element_type,
2701 sw_config->element[i].seid,
2702 sw_config->element[i].uplink_seid,
2703 sw_config->element[i].downlink_seid);
2706 /* Simplified due to a single VSI at the moment */
2707 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2708 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2709 vsi->seid = sw_config->element[0].seid;
2713 /*********************************************************************
2715 * Initialize the VSI: this handles contexts, which means things
2716 * like the number of descriptors, buffer size,
2717 * plus we init the rings thru this function.
2719 **********************************************************************/
2721 ixl_initialize_vsi(struct ixl_vsi *vsi)
2723 struct ixl_pf *pf = vsi->back;
2724 struct ixl_queue *que = vsi->queues;
2725 device_t dev = vsi->dev;
2726 struct i40e_hw *hw = vsi->hw;
2727 struct i40e_vsi_context ctxt;
2730 memset(&ctxt, 0, sizeof(ctxt));
2731 ctxt.seid = vsi->seid;
2732 if (pf->veb_seid != 0)
2733 ctxt.uplink_seid = pf->veb_seid;
2734 ctxt.pf_num = hw->pf_id;
2735 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2737 device_printf(dev,"get vsi params failed %x!!\n", err);
2741 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2742 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2743 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2744 ctxt.uplink_seid, ctxt.vsi_number,
2745 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2746 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2747 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2750 ** Set the queue and traffic class bits
2751 ** - when multiple traffic classes are supported
2752 ** this will need to be more robust.
2754 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2755 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2756 ctxt.info.queue_mapping[0] = 0;
2757 ctxt.info.tc_mapping[0] = 0x0800;
2759 /* Set VLAN receive stripping mode */
2760 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2761 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2762 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2763 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2765 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2767 /* Keep copy of VSI info in VSI for statistic counters */
2768 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2770 /* Reset VSI statistics */
2771 ixl_vsi_reset_stats(vsi);
2772 vsi->hw_filters_add = 0;
2773 vsi->hw_filters_del = 0;
2775 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2777 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2779 device_printf(dev,"update vsi params failed %x!!\n",
2780 hw->aq.asq_last_status);
2784 for (int i = 0; i < vsi->num_queues; i++, que++) {
2785 struct tx_ring *txr = &que->txr;
2786 struct rx_ring *rxr = &que->rxr;
2787 struct i40e_hmc_obj_txq tctx;
2788 struct i40e_hmc_obj_rxq rctx;
2793 /* Setup the HMC TX Context */
2794 size = que->num_desc * sizeof(struct i40e_tx_desc);
2795 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2796 tctx.new_context = 1;
2797 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2798 tctx.qlen = que->num_desc;
2800 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2801 /* Enable HEAD writeback */
2802 tctx.head_wb_ena = 1;
2803 tctx.head_wb_addr = txr->dma.pa +
2804 (que->num_desc * sizeof(struct i40e_tx_desc));
2805 tctx.rdylist_act = 0;
2806 err = i40e_clear_lan_tx_queue_context(hw, i);
2808 device_printf(dev, "Unable to clear TX context\n");
2811 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2813 device_printf(dev, "Unable to set TX context\n");
2816 /* Associate the ring with this PF */
2817 txctl = I40E_QTX_CTL_PF_QUEUE;
2818 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2819 I40E_QTX_CTL_PF_INDX_MASK);
2820 wr32(hw, I40E_QTX_CTL(i), txctl);
2823 /* Do ring (re)init */
2824 ixl_init_tx_ring(que);
2826 /* Next setup the HMC RX Context */
2827 if (vsi->max_frame_size <= MCLBYTES)
2828 rxr->mbuf_sz = MCLBYTES;
2830 rxr->mbuf_sz = MJUMPAGESIZE;
2832 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2834 /* Set up an RX context for the HMC */
2835 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2836 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2837 /* ignore header split for now */
2838 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2839 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2840 vsi->max_frame_size : max_rxmax;
2842 rctx.dsize = 1; /* do 32byte descriptors */
2843 rctx.hsplit_0 = 0; /* no HDR split initially */
2844 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2845 rctx.qlen = que->num_desc;
2846 rctx.tphrdesc_ena = 1;
2847 rctx.tphwdesc_ena = 1;
2848 rctx.tphdata_ena = 0;
2849 rctx.tphhead_ena = 0;
2850 rctx.lrxqthresh = 2;
2857 err = i40e_clear_lan_rx_queue_context(hw, i);
2860 "Unable to clear RX context %d\n", i);
2863 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2865 device_printf(dev, "Unable to set RX context %d\n", i);
2868 err = ixl_init_rx_ring(que);
2870 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2873 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2875 /* preserve queue */
2876 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2877 struct netmap_adapter *na = NA(vsi->ifp);
2878 struct netmap_kring *kring = &na->rx_rings[i];
2879 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2880 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2882 #endif /* DEV_NETMAP */
2883 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2889 /*********************************************************************
2891 * Free all VSI structs.
2893 **********************************************************************/
2895 ixl_free_vsi(struct ixl_vsi *vsi)
2897 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2898 struct ixl_queue *que = vsi->queues;
2900 /* Free station queues */
2901 for (int i = 0; i < vsi->num_queues; i++, que++) {
2902 struct tx_ring *txr = &que->txr;
2903 struct rx_ring *rxr = &que->rxr;
2905 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2908 ixl_free_que_tx(que);
2910 i40e_free_dma_mem(&pf->hw, &txr->dma);
2912 IXL_TX_LOCK_DESTROY(txr);
2914 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2917 ixl_free_que_rx(que);
2919 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2921 IXL_RX_LOCK_DESTROY(rxr);
2924 free(vsi->queues, M_DEVBUF);
2926 /* Free VSI filter list */
2927 ixl_free_mac_filters(vsi);
2931 ixl_free_mac_filters(struct ixl_vsi *vsi)
2933 struct ixl_mac_filter *f;
2935 while (!SLIST_EMPTY(&vsi->ftl)) {
2936 f = SLIST_FIRST(&vsi->ftl);
2937 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2943 /*********************************************************************
2945 * Allocate memory for the VSI (virtual station interface) and their
2946 * associated queues, rings and the descriptors associated with each,
2947 * called only once at attach.
2949 **********************************************************************/
2951 ixl_setup_stations(struct ixl_pf *pf)
2953 device_t dev = pf->dev;
2954 struct ixl_vsi *vsi;
2955 struct ixl_queue *que;
2956 struct tx_ring *txr;
2957 struct rx_ring *rxr;
2959 int error = I40E_SUCCESS;
2962 vsi->back = (void *)pf;
2968 /* Get memory for the station queues */
2970 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2971 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2972 device_printf(dev, "Unable to allocate queue memory\n");
2977 for (int i = 0; i < vsi->num_queues; i++) {
2978 que = &vsi->queues[i];
2979 que->num_desc = ixl_ringsz;
2982 /* mark the queue as active */
2983 vsi->active_queues |= (u64)1 << que->me;
2986 txr->tail = I40E_QTX_TAIL(que->me);
2988 /* Initialize the TX lock */
2989 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2990 device_get_nameunit(dev), que->me);
2991 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2992 /* Create the TX descriptor ring */
2993 tsize = roundup2((que->num_desc *
2994 sizeof(struct i40e_tx_desc)) +
2995 sizeof(u32), DBA_ALIGN);
2996 if (i40e_allocate_dma_mem(&pf->hw,
2997 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2999 "Unable to allocate TX Descriptor memory\n");
3003 txr->base = (struct i40e_tx_desc *)txr->dma.va;
3004 bzero((void *)txr->base, tsize);
3005 /* Now allocate transmit soft structs for the ring */
3006 if (ixl_allocate_tx_data(que)) {
3008 "Critical Failure setting up TX structures\n");
3012 /* Allocate a buf ring */
3013 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3014 M_WAITOK, &txr->mtx);
3015 if (txr->br == NULL) {
3017 "Critical Failure setting up TX buf ring\n");
3023 * Next the RX queues...
3025 rsize = roundup2(que->num_desc *
3026 sizeof(union i40e_rx_desc), DBA_ALIGN);
3029 rxr->tail = I40E_QRX_TAIL(que->me);
3031 /* Initialize the RX side lock */
3032 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3033 device_get_nameunit(dev), que->me);
3034 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3036 if (i40e_allocate_dma_mem(&pf->hw,
3037 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3039 "Unable to allocate RX Descriptor memory\n");
3043 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3044 bzero((void *)rxr->base, rsize);
3046 /* Allocate receive soft structs for the ring*/
3047 if (ixl_allocate_rx_data(que)) {
3049 "Critical Failure setting up receive structs\n");
3058 for (int i = 0; i < vsi->num_queues; i++) {
3059 que = &vsi->queues[i];
3063 i40e_free_dma_mem(&pf->hw, &rxr->dma);
3065 i40e_free_dma_mem(&pf->hw, &txr->dma);
3073 ** Provide a update to the queue RX
3074 ** interrupt moderation value.
3077 ixl_set_queue_rx_itr(struct ixl_queue *que)
3079 struct ixl_vsi *vsi = que->vsi;
3080 struct i40e_hw *hw = vsi->hw;
3081 struct rx_ring *rxr = &que->rxr;
3087 /* Idle, do nothing */
3088 if (rxr->bytes == 0)
3091 if (ixl_dynamic_rx_itr) {
3092 rx_bytes = rxr->bytes/rxr->itr;
3095 /* Adjust latency range */
3096 switch (rxr->latency) {
3097 case IXL_LOW_LATENCY:
3098 if (rx_bytes > 10) {
3099 rx_latency = IXL_AVE_LATENCY;
3100 rx_itr = IXL_ITR_20K;
3103 case IXL_AVE_LATENCY:
3104 if (rx_bytes > 20) {
3105 rx_latency = IXL_BULK_LATENCY;
3106 rx_itr = IXL_ITR_8K;
3107 } else if (rx_bytes <= 10) {
3108 rx_latency = IXL_LOW_LATENCY;
3109 rx_itr = IXL_ITR_100K;
3112 case IXL_BULK_LATENCY:
3113 if (rx_bytes <= 20) {
3114 rx_latency = IXL_AVE_LATENCY;
3115 rx_itr = IXL_ITR_20K;
3120 rxr->latency = rx_latency;
3122 if (rx_itr != rxr->itr) {
3123 /* do an exponential smoothing */
3124 rx_itr = (10 * rx_itr * rxr->itr) /
3125 ((9 * rx_itr) + rxr->itr);
3126 rxr->itr = rx_itr & IXL_MAX_ITR;
3127 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3128 que->me), rxr->itr);
3130 } else { /* We may have have toggled to non-dynamic */
3131 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3132 vsi->rx_itr_setting = ixl_rx_itr;
3133 /* Update the hardware if needed */
3134 if (rxr->itr != vsi->rx_itr_setting) {
3135 rxr->itr = vsi->rx_itr_setting;
3136 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3137 que->me), rxr->itr);
3147 ** Provide a update to the queue TX
3148 ** interrupt moderation value.
3151 ixl_set_queue_tx_itr(struct ixl_queue *que)
3153 struct ixl_vsi *vsi = que->vsi;
3154 struct i40e_hw *hw = vsi->hw;
3155 struct tx_ring *txr = &que->txr;
3161 /* Idle, do nothing */
3162 if (txr->bytes == 0)
3165 if (ixl_dynamic_tx_itr) {
3166 tx_bytes = txr->bytes/txr->itr;
3169 switch (txr->latency) {
3170 case IXL_LOW_LATENCY:
3171 if (tx_bytes > 10) {
3172 tx_latency = IXL_AVE_LATENCY;
3173 tx_itr = IXL_ITR_20K;
3176 case IXL_AVE_LATENCY:
3177 if (tx_bytes > 20) {
3178 tx_latency = IXL_BULK_LATENCY;
3179 tx_itr = IXL_ITR_8K;
3180 } else if (tx_bytes <= 10) {
3181 tx_latency = IXL_LOW_LATENCY;
3182 tx_itr = IXL_ITR_100K;
3185 case IXL_BULK_LATENCY:
3186 if (tx_bytes <= 20) {
3187 tx_latency = IXL_AVE_LATENCY;
3188 tx_itr = IXL_ITR_20K;
3193 txr->latency = tx_latency;
3195 if (tx_itr != txr->itr) {
3196 /* do an exponential smoothing */
3197 tx_itr = (10 * tx_itr * txr->itr) /
3198 ((9 * tx_itr) + txr->itr);
3199 txr->itr = tx_itr & IXL_MAX_ITR;
3200 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3201 que->me), txr->itr);
3204 } else { /* We may have have toggled to non-dynamic */
3205 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3206 vsi->tx_itr_setting = ixl_tx_itr;
3207 /* Update the hardware if needed */
3208 if (txr->itr != vsi->tx_itr_setting) {
3209 txr->itr = vsi->tx_itr_setting;
3210 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3211 que->me), txr->itr);
3219 #define QUEUE_NAME_LEN 32
3222 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3223 struct sysctl_ctx_list *ctx, const char *sysctl_name)
3225 struct sysctl_oid *tree;
3226 struct sysctl_oid_list *child;
3227 struct sysctl_oid_list *vsi_list;
3229 tree = device_get_sysctl_tree(pf->dev);
3230 child = SYSCTL_CHILDREN(tree);
3231 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3232 CTLFLAG_RD, NULL, "VSI Number");
3233 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3235 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3239 ixl_add_hw_stats(struct ixl_pf *pf)
3241 device_t dev = pf->dev;
3242 struct ixl_vsi *vsi = &pf->vsi;
3243 struct ixl_queue *queues = vsi->queues;
3244 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3246 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3247 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3248 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3249 struct sysctl_oid_list *vsi_list;
3251 struct sysctl_oid *queue_node;
3252 struct sysctl_oid_list *queue_list;
3254 struct tx_ring *txr;
3255 struct rx_ring *rxr;
3256 char queue_namebuf[QUEUE_NAME_LEN];
3258 /* Driver statistics */
3259 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3260 CTLFLAG_RD, &pf->watchdog_events,
3261 "Watchdog timeouts");
3262 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3263 CTLFLAG_RD, &pf->admin_irq,
3264 "Admin Queue IRQ Handled");
3266 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3267 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3269 /* Queue statistics */
3270 for (int q = 0; q < vsi->num_queues; q++) {
3271 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3272 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3273 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3274 queue_list = SYSCTL_CHILDREN(queue_node);
3276 txr = &(queues[q].txr);
3277 rxr = &(queues[q].rxr);
3279 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3280 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3281 "m_defrag() failed");
3282 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3283 CTLFLAG_RD, &(queues[q].dropped_pkts),
3284 "Driver dropped packets");
3285 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3286 CTLFLAG_RD, &(queues[q].irqs),
3287 "irqs on this queue");
3288 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3289 CTLFLAG_RD, &(queues[q].tso),
3291 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3292 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3293 "Driver tx dma failure in xmit");
3294 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3295 CTLFLAG_RD, &(txr->no_desc),
3296 "Queue No Descriptor Available");
3297 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3298 CTLFLAG_RD, &(txr->total_packets),
3299 "Queue Packets Transmitted");
3300 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3301 CTLFLAG_RD, &(txr->tx_bytes),
3302 "Queue Bytes Transmitted");
3303 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3304 CTLFLAG_RD, &(rxr->rx_packets),
3305 "Queue Packets Received");
3306 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3307 CTLFLAG_RD, &(rxr->rx_bytes),
3308 "Queue Bytes Received");
3312 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3316 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3317 struct sysctl_oid_list *child,
3318 struct i40e_eth_stats *eth_stats)
3320 struct ixl_sysctl_info ctls[] =
3322 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3323 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3324 "Unicast Packets Received"},
3325 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3326 "Multicast Packets Received"},
3327 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3328 "Broadcast Packets Received"},
3329 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3330 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3331 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3332 {ð_stats->tx_multicast, "mcast_pkts_txd",
3333 "Multicast Packets Transmitted"},
3334 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3335 "Broadcast Packets Transmitted"},
3340 struct ixl_sysctl_info *entry = ctls;
3341 while (entry->stat != 0)
3343 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3344 CTLFLAG_RD, entry->stat,
3345 entry->description);
3351 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3352 struct sysctl_oid_list *child,
3353 struct i40e_hw_port_stats *stats)
3355 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3356 CTLFLAG_RD, NULL, "Mac Statistics");
3357 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3359 struct i40e_eth_stats *eth_stats = &stats->eth;
3360 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3362 struct ixl_sysctl_info ctls[] =
3364 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3365 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3366 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3367 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3368 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3369 /* Packet Reception Stats */
3370 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3371 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3372 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3373 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3374 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3375 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3376 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3377 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3378 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3379 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3380 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3381 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3382 /* Packet Transmission Stats */
3383 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3384 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3385 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3386 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3387 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3388 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3389 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3391 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3392 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3393 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3394 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3399 struct ixl_sysctl_info *entry = ctls;
3400 while (entry->stat != 0)
3402 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3403 CTLFLAG_RD, entry->stat,
3404 entry->description);
3411 ** ixl_config_rss - setup RSS
3412 ** - note this is done for the single vsi
3414 static void ixl_config_rss(struct ixl_vsi *vsi)
3416 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3417 struct i40e_hw *hw = vsi->hw;
3419 u64 set_hena = 0, hena;
3422 u32 rss_hash_config;
3423 u32 rss_seed[IXL_KEYSZ];
3425 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3426 0x183cfd8c, 0xce880440, 0x580cbc3c,
3427 0x35897377, 0x328b25e1, 0x4fa98922,
3428 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3432 /* Fetch the configured RSS key */
3433 rss_getkey((uint8_t *) &rss_seed);
3436 /* Fill out hash function seed */
3437 for (i = 0; i < IXL_KEYSZ; i++)
3438 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3440 /* Enable PCTYPES for RSS: */
3442 rss_hash_config = rss_gethashconfig();
3443 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3444 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3445 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3446 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3447 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3448 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3449 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3450 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3451 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3452 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3453 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3454 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3455 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3456 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3459 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3460 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3461 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3462 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3463 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3464 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3465 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3466 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3467 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3468 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3469 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3471 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3472 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3474 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3475 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3477 /* Populate the LUT with max no. of queues in round robin fashion */
3478 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3479 if (j == vsi->num_queues)
3483 * Fetch the RSS bucket id for the given indirection entry.
3484 * Cap it at the number of configured buckets (which is
3487 que_id = rss_get_indirection_to_bucket(i);
3488 que_id = que_id % vsi->num_queues;
3492 /* lut = 4-byte sliding window of 4 lut entries */
3493 lut = (lut << 8) | (que_id &
3494 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3495 /* On i = 3, we have 4 entries in lut; write to the register */
3497 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3504 ** This routine is run via an vlan config EVENT,
3505 ** it enables us to use the HW Filter table since
3506 ** we can get the vlan id. This just creates the
3507 ** entry in the soft version of the VFTA, init will
3508 ** repopulate the real table.
3511 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3513 struct ixl_vsi *vsi = ifp->if_softc;
3514 struct i40e_hw *hw = vsi->hw;
3515 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3517 if (ifp->if_softc != arg) /* Not our event */
3520 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3525 ixl_add_filter(vsi, hw->mac.addr, vtag);
3530 ** This routine is run via an vlan
3531 ** unconfig EVENT, remove our entry
3532 ** in the soft vfta.
3535 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3537 struct ixl_vsi *vsi = ifp->if_softc;
3538 struct i40e_hw *hw = vsi->hw;
3539 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3541 if (ifp->if_softc != arg)
3544 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3549 ixl_del_filter(vsi, hw->mac.addr, vtag);
3554 ** This routine updates vlan filters, called by init
3555 ** it scans the filter table and then updates the hw
3556 ** after a soft reset.
3559 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3561 struct ixl_mac_filter *f;
3564 if (vsi->num_vlans == 0)
3567 ** Scan the filter list for vlan entries,
3568 ** mark them for addition and then call
3569 ** for the AQ update.
3571 SLIST_FOREACH(f, &vsi->ftl, next) {
3572 if (f->flags & IXL_FILTER_VLAN) {
3580 printf("setup vlan: no filters found!\n");
3583 flags = IXL_FILTER_VLAN;
3584 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3585 ixl_add_hw_filters(vsi, flags, cnt);
3590 ** Initialize filter list and add filters that the hardware
3591 ** needs to know about.
3594 ixl_init_filters(struct ixl_vsi *vsi)
3596 /* Add broadcast address */
3597 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3601 ** This routine adds mulicast filters
3604 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3606 struct ixl_mac_filter *f;
3608 /* Does one already exist */
3609 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3613 f = ixl_get_filter(vsi);
3615 printf("WARNING: no filter available!!\n");
3618 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3619 f->vlan = IXL_VLAN_ANY;
3620 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3627 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3630 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3634 ** This routine adds macvlan filters
3637 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3639 struct ixl_mac_filter *f, *tmp;
3643 DEBUGOUT("ixl_add_filter: begin");
3648 /* Does one already exist */
3649 f = ixl_find_filter(vsi, macaddr, vlan);
3653 ** Is this the first vlan being registered, if so we
3654 ** need to remove the ANY filter that indicates we are
3655 ** not in a vlan, and replace that with a 0 filter.
3657 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3658 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3660 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3661 ixl_add_filter(vsi, macaddr, 0);
3665 f = ixl_get_filter(vsi);
3667 device_printf(dev, "WARNING: no filter available!!\n");
3670 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3672 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3673 if (f->vlan != IXL_VLAN_ANY)
3674 f->flags |= IXL_FILTER_VLAN;
3678 ixl_add_hw_filters(vsi, f->flags, 1);
3683 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3685 struct ixl_mac_filter *f;
3687 f = ixl_find_filter(vsi, macaddr, vlan);
3691 f->flags |= IXL_FILTER_DEL;
3692 ixl_del_hw_filters(vsi, 1);
3695 /* Check if this is the last vlan removal */
3696 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3697 /* Switch back to a non-vlan filter */
3698 ixl_del_filter(vsi, macaddr, 0);
3699 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3705 ** Find the filter with both matching mac addr and vlan id
3707 static struct ixl_mac_filter *
3708 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3710 struct ixl_mac_filter *f;
3713 SLIST_FOREACH(f, &vsi->ftl, next) {
3714 if (!cmp_etheraddr(f->macaddr, macaddr))
3716 if (f->vlan == vlan) {
3728 ** This routine takes additions to the vsi filter
3729 ** table and creates an Admin Queue call to create
3730 ** the filters in the hardware.
3733 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3735 struct i40e_aqc_add_macvlan_element_data *a, *b;
3736 struct ixl_mac_filter *f;
3745 IXL_PF_LOCK_ASSERT(pf);
3747 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3748 M_DEVBUF, M_NOWAIT | M_ZERO);
3750 device_printf(dev, "add_hw_filters failed to get memory\n");
3755 ** Scan the filter list, each time we find one
3756 ** we add it to the admin queue array and turn off
3759 SLIST_FOREACH(f, &vsi->ftl, next) {
3760 if (f->flags == flags) {
3761 b = &a[j]; // a pox on fvl long names :)
3762 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3763 if (f->vlan == IXL_VLAN_ANY) {
3765 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3767 b->vlan_tag = f->vlan;
3770 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3771 f->flags &= ~IXL_FILTER_ADD;
3778 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3780 device_printf(dev, "aq_add_macvlan err %d, "
3781 "aq_error %d\n", err, hw->aq.asq_last_status);
3783 vsi->hw_filters_add += j;
3790 ** This routine takes removals in the vsi filter
3791 ** table and creates an Admin Queue call to delete
3792 ** the filters in the hardware.
3795 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3797 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3801 struct ixl_mac_filter *f, *f_temp;
3804 DEBUGOUT("ixl_del_hw_filters: begin\n");
3810 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3811 M_DEVBUF, M_NOWAIT | M_ZERO);
3813 printf("del hw filter failed to get memory\n");
3817 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3818 if (f->flags & IXL_FILTER_DEL) {
3819 e = &d[j]; // a pox on fvl long names :)
3820 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3821 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3822 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3823 /* delete entry from vsi list */
3824 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3832 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3833 /* NOTE: returns ENOENT every time but seems to work fine,
3834 so we'll ignore that specific error. */
3835 // TODO: Does this still occur on current firmwares?
3836 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3838 for (int i = 0; i < j; i++)
3839 sc += (!d[i].error_code);
3840 vsi->hw_filters_del += sc;
3842 "Failed to remove %d/%d filters, aq error %d\n",
3843 j - sc, j, hw->aq.asq_last_status);
3845 vsi->hw_filters_del += j;
3849 DEBUGOUT("ixl_del_hw_filters: end\n");
3854 ixl_enable_rings(struct ixl_vsi *vsi)
3856 struct ixl_pf *pf = vsi->back;
3857 struct i40e_hw *hw = &pf->hw;
3862 for (int i = 0; i < vsi->num_queues; i++) {
3863 index = vsi->first_queue + i;
3864 i40e_pre_tx_queue_cfg(hw, index, TRUE);
3866 reg = rd32(hw, I40E_QTX_ENA(index));
3867 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3868 I40E_QTX_ENA_QENA_STAT_MASK;
3869 wr32(hw, I40E_QTX_ENA(index), reg);
3870 /* Verify the enable took */
3871 for (int j = 0; j < 10; j++) {
3872 reg = rd32(hw, I40E_QTX_ENA(index));
3873 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3875 i40e_msec_delay(10);
3877 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3878 device_printf(pf->dev, "TX queue %d disabled!\n",
3883 reg = rd32(hw, I40E_QRX_ENA(index));
3884 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3885 I40E_QRX_ENA_QENA_STAT_MASK;
3886 wr32(hw, I40E_QRX_ENA(index), reg);
3887 /* Verify the enable took */
3888 for (int j = 0; j < 10; j++) {
3889 reg = rd32(hw, I40E_QRX_ENA(index));
3890 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3892 i40e_msec_delay(10);
3894 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3895 device_printf(pf->dev, "RX queue %d disabled!\n",
3905 ixl_disable_rings(struct ixl_vsi *vsi)
3907 struct ixl_pf *pf = vsi->back;
3908 struct i40e_hw *hw = &pf->hw;
3913 for (int i = 0; i < vsi->num_queues; i++) {
3914 index = vsi->first_queue + i;
3916 i40e_pre_tx_queue_cfg(hw, index, FALSE);
3917 i40e_usec_delay(500);
3919 reg = rd32(hw, I40E_QTX_ENA(index));
3920 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3921 wr32(hw, I40E_QTX_ENA(index), reg);
3922 /* Verify the disable took */
3923 for (int j = 0; j < 10; j++) {
3924 reg = rd32(hw, I40E_QTX_ENA(index));
3925 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3927 i40e_msec_delay(10);
3929 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3930 device_printf(pf->dev, "TX queue %d still enabled!\n",
3935 reg = rd32(hw, I40E_QRX_ENA(index));
3936 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3937 wr32(hw, I40E_QRX_ENA(index), reg);
3938 /* Verify the disable took */
3939 for (int j = 0; j < 10; j++) {
3940 reg = rd32(hw, I40E_QRX_ENA(index));
3941 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3943 i40e_msec_delay(10);
3945 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3946 device_printf(pf->dev, "RX queue %d still enabled!\n",
3956 * ixl_handle_mdd_event
3958 * Called from interrupt handler to identify possibly malicious vfs
3959 * (But also detects events from the PF, as well)
3961 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3963 struct i40e_hw *hw = &pf->hw;
3964 device_t dev = pf->dev;
3965 bool mdd_detected = false;
3966 bool pf_mdd_detected = false;
3969 /* find what triggered the MDD event */
3970 reg = rd32(hw, I40E_GL_MDET_TX);
3971 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3972 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3973 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3974 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3975 I40E_GL_MDET_TX_EVENT_SHIFT;
3976 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3977 I40E_GL_MDET_TX_QUEUE_SHIFT;
3979 "Malicious Driver Detection event 0x%02x"
3980 " on TX queue %d pf number 0x%02x\n",
3981 event, queue, pf_num);
3982 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3983 mdd_detected = true;
3985 reg = rd32(hw, I40E_GL_MDET_RX);
3986 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3987 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3988 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3989 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3990 I40E_GL_MDET_RX_EVENT_SHIFT;
3991 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3992 I40E_GL_MDET_RX_QUEUE_SHIFT;
3994 "Malicious Driver Detection event 0x%02x"
3995 " on RX queue %d of function 0x%02x\n",
3996 event, queue, func);
3997 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3998 mdd_detected = true;
4002 reg = rd32(hw, I40E_PF_MDET_TX);
4003 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4004 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4006 "MDD TX event is for this function 0x%08x",
4008 pf_mdd_detected = true;
4010 reg = rd32(hw, I40E_PF_MDET_RX);
4011 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4012 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4014 "MDD RX event is for this function 0x%08x",
4016 pf_mdd_detected = true;
4020 /* re-enable mdd interrupt cause */
4021 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4022 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4023 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4028 ixl_enable_intr(struct ixl_vsi *vsi)
4030 struct i40e_hw *hw = vsi->hw;
4031 struct ixl_queue *que = vsi->queues;
4033 if (ixl_enable_msix) {
4034 ixl_enable_adminq(hw);
4035 for (int i = 0; i < vsi->num_queues; i++, que++)
4036 ixl_enable_queue(hw, que->me);
4038 ixl_enable_legacy(hw);
4042 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4044 struct i40e_hw *hw = vsi->hw;
4045 struct ixl_queue *que = vsi->queues;
4047 for (int i = 0; i < vsi->num_queues; i++, que++)
4048 ixl_disable_queue(hw, que->me);
4052 ixl_disable_intr(struct ixl_vsi *vsi)
4054 struct i40e_hw *hw = vsi->hw;
4056 if (ixl_enable_msix)
4057 ixl_disable_adminq(hw);
4059 ixl_disable_legacy(hw);
4063 ixl_enable_adminq(struct i40e_hw *hw)
4067 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4068 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4069 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4070 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4076 ixl_disable_adminq(struct i40e_hw *hw)
4080 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4081 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4087 ixl_enable_queue(struct i40e_hw *hw, int id)
4091 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4092 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4093 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4094 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4098 ixl_disable_queue(struct i40e_hw *hw, int id)
4102 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4103 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4109 ixl_enable_legacy(struct i40e_hw *hw)
4112 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4113 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4114 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4115 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4119 ixl_disable_legacy(struct i40e_hw *hw)
4123 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4124 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4130 ixl_update_stats_counters(struct ixl_pf *pf)
4132 struct i40e_hw *hw = &pf->hw;
4133 struct ixl_vsi *vsi = &pf->vsi;
4136 struct i40e_hw_port_stats *nsd = &pf->stats;
4137 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4139 /* Update hw stats */
4140 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4141 pf->stat_offsets_loaded,
4142 &osd->crc_errors, &nsd->crc_errors);
4143 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4144 pf->stat_offsets_loaded,
4145 &osd->illegal_bytes, &nsd->illegal_bytes);
4146 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4147 I40E_GLPRT_GORCL(hw->port),
4148 pf->stat_offsets_loaded,
4149 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4150 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4151 I40E_GLPRT_GOTCL(hw->port),
4152 pf->stat_offsets_loaded,
4153 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4154 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4155 pf->stat_offsets_loaded,
4156 &osd->eth.rx_discards,
4157 &nsd->eth.rx_discards);
4158 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4159 I40E_GLPRT_UPRCL(hw->port),
4160 pf->stat_offsets_loaded,
4161 &osd->eth.rx_unicast,
4162 &nsd->eth.rx_unicast);
4163 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4164 I40E_GLPRT_UPTCL(hw->port),
4165 pf->stat_offsets_loaded,
4166 &osd->eth.tx_unicast,
4167 &nsd->eth.tx_unicast);
4168 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4169 I40E_GLPRT_MPRCL(hw->port),
4170 pf->stat_offsets_loaded,
4171 &osd->eth.rx_multicast,
4172 &nsd->eth.rx_multicast);
4173 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4174 I40E_GLPRT_MPTCL(hw->port),
4175 pf->stat_offsets_loaded,
4176 &osd->eth.tx_multicast,
4177 &nsd->eth.tx_multicast);
4178 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4179 I40E_GLPRT_BPRCL(hw->port),
4180 pf->stat_offsets_loaded,
4181 &osd->eth.rx_broadcast,
4182 &nsd->eth.rx_broadcast);
4183 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4184 I40E_GLPRT_BPTCL(hw->port),
4185 pf->stat_offsets_loaded,
4186 &osd->eth.tx_broadcast,
4187 &nsd->eth.tx_broadcast);
4189 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4190 pf->stat_offsets_loaded,
4191 &osd->tx_dropped_link_down,
4192 &nsd->tx_dropped_link_down);
4193 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4194 pf->stat_offsets_loaded,
4195 &osd->mac_local_faults,
4196 &nsd->mac_local_faults);
4197 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4198 pf->stat_offsets_loaded,
4199 &osd->mac_remote_faults,
4200 &nsd->mac_remote_faults);
4201 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4202 pf->stat_offsets_loaded,
4203 &osd->rx_length_errors,
4204 &nsd->rx_length_errors);
4206 /* Flow control (LFC) stats */
4207 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4208 pf->stat_offsets_loaded,
4209 &osd->link_xon_rx, &nsd->link_xon_rx);
4210 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4211 pf->stat_offsets_loaded,
4212 &osd->link_xon_tx, &nsd->link_xon_tx);
4213 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4214 pf->stat_offsets_loaded,
4215 &osd->link_xoff_rx, &nsd->link_xoff_rx);
4216 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4217 pf->stat_offsets_loaded,
4218 &osd->link_xoff_tx, &nsd->link_xoff_tx);
4220 /* Packet size stats rx */
4221 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4222 I40E_GLPRT_PRC64L(hw->port),
4223 pf->stat_offsets_loaded,
4224 &osd->rx_size_64, &nsd->rx_size_64);
4225 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4226 I40E_GLPRT_PRC127L(hw->port),
4227 pf->stat_offsets_loaded,
4228 &osd->rx_size_127, &nsd->rx_size_127);
4229 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4230 I40E_GLPRT_PRC255L(hw->port),
4231 pf->stat_offsets_loaded,
4232 &osd->rx_size_255, &nsd->rx_size_255);
4233 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4234 I40E_GLPRT_PRC511L(hw->port),
4235 pf->stat_offsets_loaded,
4236 &osd->rx_size_511, &nsd->rx_size_511);
4237 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4238 I40E_GLPRT_PRC1023L(hw->port),
4239 pf->stat_offsets_loaded,
4240 &osd->rx_size_1023, &nsd->rx_size_1023);
4241 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4242 I40E_GLPRT_PRC1522L(hw->port),
4243 pf->stat_offsets_loaded,
4244 &osd->rx_size_1522, &nsd->rx_size_1522);
4245 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4246 I40E_GLPRT_PRC9522L(hw->port),
4247 pf->stat_offsets_loaded,
4248 &osd->rx_size_big, &nsd->rx_size_big);
4250 /* Packet size stats tx */
4251 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4252 I40E_GLPRT_PTC64L(hw->port),
4253 pf->stat_offsets_loaded,
4254 &osd->tx_size_64, &nsd->tx_size_64);
4255 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4256 I40E_GLPRT_PTC127L(hw->port),
4257 pf->stat_offsets_loaded,
4258 &osd->tx_size_127, &nsd->tx_size_127);
4259 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4260 I40E_GLPRT_PTC255L(hw->port),
4261 pf->stat_offsets_loaded,
4262 &osd->tx_size_255, &nsd->tx_size_255);
4263 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4264 I40E_GLPRT_PTC511L(hw->port),
4265 pf->stat_offsets_loaded,
4266 &osd->tx_size_511, &nsd->tx_size_511);
4267 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4268 I40E_GLPRT_PTC1023L(hw->port),
4269 pf->stat_offsets_loaded,
4270 &osd->tx_size_1023, &nsd->tx_size_1023);
4271 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4272 I40E_GLPRT_PTC1522L(hw->port),
4273 pf->stat_offsets_loaded,
4274 &osd->tx_size_1522, &nsd->tx_size_1522);
4275 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4276 I40E_GLPRT_PTC9522L(hw->port),
4277 pf->stat_offsets_loaded,
4278 &osd->tx_size_big, &nsd->tx_size_big);
4280 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4281 pf->stat_offsets_loaded,
4282 &osd->rx_undersize, &nsd->rx_undersize);
4283 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4284 pf->stat_offsets_loaded,
4285 &osd->rx_fragments, &nsd->rx_fragments);
4286 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4287 pf->stat_offsets_loaded,
4288 &osd->rx_oversize, &nsd->rx_oversize);
4289 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4290 pf->stat_offsets_loaded,
4291 &osd->rx_jabber, &nsd->rx_jabber);
4292 pf->stat_offsets_loaded = true;
4295 /* Update vsi stats */
4296 ixl_update_vsi_stats(vsi);
4298 for (int i = 0; i < pf->num_vfs; i++) {
4300 if (vf->vf_flags & VF_FLAG_ENABLED)
4301 ixl_update_eth_stats(&pf->vfs[i].vsi);
4306 ** Tasklet handler for MSIX Adminq interrupts
4307 ** - do outside interrupt since it might sleep
4310 ixl_do_adminq(void *context, int pending)
4312 struct ixl_pf *pf = context;
4313 struct i40e_hw *hw = &pf->hw;
4314 struct ixl_vsi *vsi = &pf->vsi;
4315 struct i40e_arq_event_info event;
4320 event.buf_len = IXL_AQ_BUF_SZ;
4321 event.msg_buf = malloc(event.buf_len,
4322 M_DEVBUF, M_NOWAIT | M_ZERO);
4323 if (!event.msg_buf) {
4324 printf("Unable to allocate adminq memory\n");
4329 /* clean and process any events */
4331 ret = i40e_clean_arq_element(hw, &event, &result);
4334 opcode = LE16_TO_CPU(event.desc.opcode);
4336 case i40e_aqc_opc_get_link_status:
4337 ixl_link_event(pf, &event);
4338 ixl_update_link_status(pf);
4340 case i40e_aqc_opc_send_msg_to_pf:
4342 ixl_handle_vf_msg(pf, &event);
4345 case i40e_aqc_opc_event_lan_overflow:
4349 printf("AdminQ unknown event %x\n", opcode);
4354 } while (result && (loop++ < IXL_ADM_LIMIT));
4356 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4357 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4358 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4359 free(event.msg_buf, M_DEVBUF);
4362 * If there are still messages to process, reschedule ourselves.
4363 * Otherwise, re-enable our interrupt and go to sleep.
4366 taskqueue_enqueue(pf->tq, &pf->adminq);
4368 ixl_enable_intr(vsi);
4373 #ifdef IXL_DEBUG_SYSCTL
4375 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4378 int error, input = 0;
4380 error = sysctl_handle_int(oidp, &input, 0, req);
4382 if (error || !req->newptr)
4386 pf = (struct ixl_pf *)arg1;
4387 ixl_print_debug_info(pf);
4394 ixl_print_debug_info(struct ixl_pf *pf)
4396 struct i40e_hw *hw = &pf->hw;
4397 struct ixl_vsi *vsi = &pf->vsi;
4398 struct ixl_queue *que = vsi->queues;
4399 struct rx_ring *rxr = &que->rxr;
4400 struct tx_ring *txr = &que->txr;
4404 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4405 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4406 printf("RX next check = %x\n", rxr->next_check);
4407 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4408 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4409 printf("TX desc avail = %x\n", txr->avail);
4411 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4412 printf("RX Bytes = %x\n", reg);
4413 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4414 printf("Port RX Bytes = %x\n", reg);
4415 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4416 printf("RX discard = %x\n", reg);
4417 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4418 printf("Port RX discard = %x\n", reg);
4420 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4421 printf("TX errors = %x\n", reg);
4422 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4423 printf("TX Bytes = %x\n", reg);
4425 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4426 printf("RX undersize = %x\n", reg);
4427 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4428 printf("RX fragments = %x\n", reg);
4429 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4430 printf("RX oversize = %x\n", reg);
4431 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4432 printf("RX length error = %x\n", reg);
4433 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4434 printf("mac remote fault = %x\n", reg);
4435 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4436 printf("mac local fault = %x\n", reg);
4441 * Update VSI-specific ethernet statistics counters.
4443 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4445 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4446 struct i40e_hw *hw = &pf->hw;
4447 struct i40e_eth_stats *es;
4448 struct i40e_eth_stats *oes;
4449 struct i40e_hw_port_stats *nsd;
4450 u16 stat_idx = vsi->info.stat_counter_idx;
4452 es = &vsi->eth_stats;
4453 oes = &vsi->eth_stats_offsets;
4456 /* Gather up the stats that the hw collects */
4457 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4458 vsi->stat_offsets_loaded,
4459 &oes->tx_errors, &es->tx_errors);
4460 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4461 vsi->stat_offsets_loaded,
4462 &oes->rx_discards, &es->rx_discards);
4464 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4465 I40E_GLV_GORCL(stat_idx),
4466 vsi->stat_offsets_loaded,
4467 &oes->rx_bytes, &es->rx_bytes);
4468 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4469 I40E_GLV_UPRCL(stat_idx),
4470 vsi->stat_offsets_loaded,
4471 &oes->rx_unicast, &es->rx_unicast);
4472 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4473 I40E_GLV_MPRCL(stat_idx),
4474 vsi->stat_offsets_loaded,
4475 &oes->rx_multicast, &es->rx_multicast);
4476 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4477 I40E_GLV_BPRCL(stat_idx),
4478 vsi->stat_offsets_loaded,
4479 &oes->rx_broadcast, &es->rx_broadcast);
4481 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4482 I40E_GLV_GOTCL(stat_idx),
4483 vsi->stat_offsets_loaded,
4484 &oes->tx_bytes, &es->tx_bytes);
4485 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4486 I40E_GLV_UPTCL(stat_idx),
4487 vsi->stat_offsets_loaded,
4488 &oes->tx_unicast, &es->tx_unicast);
4489 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4490 I40E_GLV_MPTCL(stat_idx),
4491 vsi->stat_offsets_loaded,
4492 &oes->tx_multicast, &es->tx_multicast);
4493 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4494 I40E_GLV_BPTCL(stat_idx),
4495 vsi->stat_offsets_loaded,
4496 &oes->tx_broadcast, &es->tx_broadcast);
4497 vsi->stat_offsets_loaded = true;
4501 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4505 struct i40e_eth_stats *es;
4508 struct i40e_hw_port_stats *nsd;
4512 es = &vsi->eth_stats;
4515 ixl_update_eth_stats(vsi);
4517 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4518 for (int i = 0; i < vsi->num_queues; i++)
4519 tx_discards += vsi->queues[i].txr.br->br_drops;
4521 /* Update ifnet stats */
4522 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4525 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4528 IXL_SET_IBYTES(vsi, es->rx_bytes);
4529 IXL_SET_OBYTES(vsi, es->tx_bytes);
4530 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4531 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4533 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4534 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4536 IXL_SET_OERRORS(vsi, es->tx_errors);
4537 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4538 IXL_SET_OQDROPS(vsi, tx_discards);
4539 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4540 IXL_SET_COLLISIONS(vsi, 0);
4544 * Reset all of the stats for the given pf
4546 void ixl_pf_reset_stats(struct ixl_pf *pf)
4548 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4549 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4550 pf->stat_offsets_loaded = false;
4554 * Resets all stats of the given vsi
4556 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4558 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4559 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4560 vsi->stat_offsets_loaded = false;
4564 * Read and update a 48 bit stat from the hw
4566 * Since the device stats are not reset at PFReset, they likely will not
4567 * be zeroed when the driver starts. We'll save the first values read
4568 * and use them as offsets to be subtracted from the raw values in order
4569 * to report stats that count from zero.
4572 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4573 bool offset_loaded, u64 *offset, u64 *stat)
4577 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4578 new_data = rd64(hw, loreg);
4581 * Use two rd32's instead of one rd64; FreeBSD versions before
4582 * 10 don't support 8 byte bus reads/writes.
4584 new_data = rd32(hw, loreg);
4585 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4590 if (new_data >= *offset)
4591 *stat = new_data - *offset;
4593 *stat = (new_data + ((u64)1 << 48)) - *offset;
4594 *stat &= 0xFFFFFFFFFFFFULL;
4598 * Read and update a 32 bit stat from the hw
4601 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4602 bool offset_loaded, u64 *offset, u64 *stat)
4606 new_data = rd32(hw, reg);
4609 if (new_data >= *offset)
4610 *stat = (u32)(new_data - *offset);
4612 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4616 ** Set flow control using sysctl:
4623 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4626 * TODO: ensure flow control is disabled if
4627 * priority flow control is enabled
4629 * TODO: ensure tx CRC by hardware should be enabled
4630 * if tx flow control is enabled.
4632 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4633 struct i40e_hw *hw = &pf->hw;
4634 device_t dev = pf->dev;
4636 enum i40e_status_code aq_error = 0;
4640 error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4641 if ((error) || (req->newptr == NULL))
4643 if (pf->fc < 0 || pf->fc > 3) {
4645 "Invalid fc mode; valid modes are 0 through 3\n");
4650 ** Changing flow control mode currently does not work on
4653 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4654 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4655 device_printf(dev, "Changing flow control mode unsupported"
4656 " on 40GBase-CR4 media.\n");
4660 /* Set fc ability for port */
4661 hw->fc.requested_mode = pf->fc;
4662 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4665 "%s: Error setting new fc mode %d; fc_err %#x\n",
4666 __func__, aq_error, fc_aq_err);
4674 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4676 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4677 struct i40e_hw *hw = &pf->hw;
4678 int error = 0, index = 0;
4689 ixl_update_link_status(pf);
4691 switch (hw->phy.link_info.link_speed) {
4692 case I40E_LINK_SPEED_100MB:
4695 case I40E_LINK_SPEED_1GB:
4698 case I40E_LINK_SPEED_10GB:
4701 case I40E_LINK_SPEED_40GB:
4704 case I40E_LINK_SPEED_20GB:
4707 case I40E_LINK_SPEED_UNKNOWN:
4713 error = sysctl_handle_string(oidp, speeds[index],
4714 strlen(speeds[index]), req);
4719 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4721 struct i40e_hw *hw = &pf->hw;
4722 device_t dev = pf->dev;
4723 struct i40e_aq_get_phy_abilities_resp abilities;
4724 struct i40e_aq_set_phy_config config;
4725 enum i40e_status_code aq_error = 0;
4727 /* Get current capability information */
4728 aq_error = i40e_aq_get_phy_capabilities(hw,
4729 FALSE, FALSE, &abilities, NULL);
4732 "%s: Error getting phy capabilities %d,"
4733 " aq error: %d\n", __func__, aq_error,
4734 hw->aq.asq_last_status);
4738 /* Prepare new config */
4739 bzero(&config, sizeof(config));
4740 config.phy_type = abilities.phy_type;
4741 config.abilities = abilities.abilities
4742 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4743 config.eee_capability = abilities.eee_capability;
4744 config.eeer = abilities.eeer_val;
4745 config.low_power_ctrl = abilities.d3_lpan;
4746 /* Translate into aq cmd link_speed */
4748 config.link_speed |= I40E_LINK_SPEED_20GB;
4750 config.link_speed |= I40E_LINK_SPEED_10GB;
4752 config.link_speed |= I40E_LINK_SPEED_1GB;
4754 config.link_speed |= I40E_LINK_SPEED_100MB;
4756 /* Do aq command & restart link */
4757 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4760 "%s: Error setting new phy config %d,"
4761 " aq error: %d\n", __func__, aq_error,
4762 hw->aq.asq_last_status);
4767 ** This seems a bit heavy handed, but we
4768 ** need to get a reinit on some devices
4772 ixl_init_locked(pf);
4779 ** Control link advertise speed:
4781 ** 0x1 - advertise 100 Mb
4782 ** 0x2 - advertise 1G
4783 ** 0x4 - advertise 10G
4784 ** 0x8 - advertise 20G
4786 ** Does not work on 40G devices.
4789 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4791 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4792 struct i40e_hw *hw = &pf->hw;
4793 device_t dev = pf->dev;
4794 int requested_ls = 0;
4798 ** FW doesn't support changing advertised speed
4799 ** for 40G devices; speed is always 40G.
4801 if (i40e_is_40G_device(hw->device_id))
4804 /* Read in new mode */
4805 requested_ls = pf->advertised_speed;
4806 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4807 if ((error) || (req->newptr == NULL))
4809 /* Check for sane value */
4810 if (requested_ls < 0x1 || requested_ls > 0xE) {
4811 device_printf(dev, "Invalid advertised speed; "
4812 "valid modes are 0x1 through 0xE\n");
4815 /* Then check for validity based on adapter type */
4816 switch (hw->device_id) {
4817 case I40E_DEV_ID_10G_BASE_T:
4818 if (requested_ls & 0x8) {
4820 "20Gbs speed not supported on this device.\n");
4824 case I40E_DEV_ID_20G_KR2:
4825 if (requested_ls & 0x1) {
4827 "100Mbs speed not supported on this device.\n");
4832 if (requested_ls & ~0x6) {
4834 "Only 1/10Gbs speeds are supported on this device.\n");
4840 /* Exit if no change */
4841 if (pf->advertised_speed == requested_ls)
4844 error = ixl_set_advertised_speeds(pf, requested_ls);
4848 pf->advertised_speed = requested_ls;
4849 ixl_update_link_status(pf);
4854 ** Get the width and transaction speed of
4855 ** the bus this adapter is plugged into.
4858 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4864 /* Get the PCI Express Capabilities offset */
4865 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4867 /* ...and read the Link Status Register */
4868 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4870 switch (link & I40E_PCI_LINK_WIDTH) {
4871 case I40E_PCI_LINK_WIDTH_1:
4872 hw->bus.width = i40e_bus_width_pcie_x1;
4874 case I40E_PCI_LINK_WIDTH_2:
4875 hw->bus.width = i40e_bus_width_pcie_x2;
4877 case I40E_PCI_LINK_WIDTH_4:
4878 hw->bus.width = i40e_bus_width_pcie_x4;
4880 case I40E_PCI_LINK_WIDTH_8:
4881 hw->bus.width = i40e_bus_width_pcie_x8;
4884 hw->bus.width = i40e_bus_width_unknown;
4888 switch (link & I40E_PCI_LINK_SPEED) {
4889 case I40E_PCI_LINK_SPEED_2500:
4890 hw->bus.speed = i40e_bus_speed_2500;
4892 case I40E_PCI_LINK_SPEED_5000:
4893 hw->bus.speed = i40e_bus_speed_5000;
4895 case I40E_PCI_LINK_SPEED_8000:
4896 hw->bus.speed = i40e_bus_speed_8000;
4899 hw->bus.speed = i40e_bus_speed_unknown;
4904 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4905 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4906 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4907 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4908 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4909 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4910 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4913 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4914 (hw->bus.speed < i40e_bus_speed_8000)) {
4915 device_printf(dev, "PCI-Express bandwidth available"
4916 " for this device\n may be insufficient for"
4917 " optimal performance.\n");
4918 device_printf(dev, "For expected performance a x8 "
4919 "PCIE Gen3 slot is required.\n");
4926 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4928 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4929 struct i40e_hw *hw = &pf->hw;
4932 snprintf(buf, sizeof(buf),
4933 "f%d.%d a%d.%d n%02x.%02x e%08x",
4934 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4935 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4936 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4937 IXL_NVM_VERSION_HI_SHIFT,
4938 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4939 IXL_NVM_VERSION_LO_SHIFT,
4941 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4945 #ifdef IXL_DEBUG_SYSCTL
4947 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4949 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4950 struct i40e_hw *hw = &pf->hw;
4951 struct i40e_link_status link_status;
4954 enum i40e_status_code aq_error = 0;
4956 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4958 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4963 "PHY Type : %#04x\n"
4965 "Link info: %#04x\n"
4968 link_status.phy_type, link_status.link_speed,
4969 link_status.link_info, link_status.an_info,
4970 link_status.ext_info);
4972 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4976 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4978 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4979 struct i40e_hw *hw = &pf->hw;
4981 enum i40e_status_code aq_error = 0;
4983 struct i40e_aq_get_phy_abilities_resp abilities;
4985 aq_error = i40e_aq_get_phy_capabilities(hw,
4986 TRUE, FALSE, &abilities, NULL);
4988 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4993 "PHY Type : %#010x\n"
4995 "Abilities: %#04x\n"
4997 "EEER reg : %#010x\n"
4999 abilities.phy_type, abilities.link_speed,
5000 abilities.abilities, abilities.eee_capability,
5001 abilities.eeer_val, abilities.d3_lpan);
5003 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5007 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5009 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5010 struct ixl_vsi *vsi = &pf->vsi;
5011 struct ixl_mac_filter *f;
5016 int ftl_counter = 0;
5020 SLIST_FOREACH(f, &vsi->ftl, next) {
5025 sysctl_handle_string(oidp, "(none)", 6, req);
5029 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5030 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5032 sprintf(buf_i++, "\n");
5033 SLIST_FOREACH(f, &vsi->ftl, next) {
5035 MAC_FORMAT ", vlan %4d, flags %#06x",
5036 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5038 /* don't print '\n' for last entry */
5039 if (++ftl_counter != ftl_len) {
5040 sprintf(buf_i, "\n");
5045 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5047 printf("sysctl error: %d\n", error);
5048 free(buf, M_DEVBUF);
5052 #define IXL_SW_RES_SIZE 0x14
5054 ixl_res_alloc_cmp(const void *a, const void *b)
5056 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5057 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5058 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5060 return ((int)one->resource_type - (int)two->resource_type);
5064 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5066 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5067 struct i40e_hw *hw = &pf->hw;
5068 device_t dev = pf->dev;
5073 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5075 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5077 device_printf(dev, "Could not allocate sbuf for output.\n");
5081 bzero(resp, sizeof(resp));
5082 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5088 "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5089 __func__, error, hw->aq.asq_last_status);
5094 /* Sort entries by type for display */
5095 qsort(resp, num_entries,
5096 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5097 &ixl_res_alloc_cmp);
5099 sbuf_cat(buf, "\n");
5100 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5102 "Type | Guaranteed | Total | Used | Un-allocated\n"
5103 " | (this) | (all) | (this) | (all) \n");
5104 for (int i = 0; i < num_entries; i++) {
5106 "%#4x | %10d %5d %6d %12d",
5107 resp[i].resource_type,
5111 resp[i].total_unalloced);
5112 if (i < num_entries - 1)
5113 sbuf_cat(buf, "\n");
5116 error = sbuf_finish(buf);
5118 device_printf(dev, "Error finishing sbuf: %d\n", error);
5123 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5125 device_printf(dev, "sysctl error: %d\n", error);
5132 ** Caller must init and delete sbuf; this function will clear and
5133 ** finish it for caller.
5136 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5140 if (seid == 0 && uplink)
5141 sbuf_cat(s, "Network");
5143 sbuf_cat(s, "Host");
5147 sbuf_printf(s, "MAC %d", seid - 2);
5148 else if (seid <= 15)
5149 sbuf_cat(s, "Reserved");
5150 else if (seid <= 31)
5151 sbuf_printf(s, "PF %d", seid - 16);
5152 else if (seid <= 159)
5153 sbuf_printf(s, "VF %d", seid - 32);
5154 else if (seid <= 287)
5155 sbuf_cat(s, "Reserved");
5156 else if (seid <= 511)
5157 sbuf_cat(s, "Other"); // for other structures
5158 else if (seid <= 895)
5159 sbuf_printf(s, "VSI %d", seid - 512);
5160 else if (seid <= 1023)
5161 sbuf_printf(s, "Reserved");
5163 sbuf_cat(s, "Invalid");
5166 return sbuf_data(s);
5170 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5172 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5173 struct i40e_hw *hw = &pf->hw;
5174 device_t dev = pf->dev;
5178 u8 aq_buf[I40E_AQ_LARGE_BUF];
5181 struct i40e_aqc_get_switch_config_resp *sw_config;
5182 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5184 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5186 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5190 error = i40e_aq_get_switch_config(hw, sw_config,
5191 sizeof(aq_buf), &next, NULL);
5194 "%s: aq_get_switch_config() error %d, aq error %d\n",
5195 __func__, error, hw->aq.asq_last_status);
5200 nmbuf = sbuf_new_auto();
5202 device_printf(dev, "Could not allocate sbuf for name output.\n");
5206 sbuf_cat(buf, "\n");
5207 // Assuming <= 255 elements in switch
5208 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5210 ** Revision -- all elements are revision 1 for now
5213 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5214 " | | | (uplink)\n");
5215 for (int i = 0; i < sw_config->header.num_reported; i++) {
5216 // "%4d (%8s) | %8s %8s %#8x",
5217 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5219 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5220 sw_config->element[i].seid, false));
5221 sbuf_cat(buf, " | ");
5222 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5223 sw_config->element[i].uplink_seid, true));
5225 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5226 sw_config->element[i].downlink_seid, false));
5228 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5229 if (i < sw_config->header.num_reported - 1)
5230 sbuf_cat(buf, "\n");
5234 error = sbuf_finish(buf);
5236 device_printf(dev, "Error finishing sbuf: %d\n", error);
5241 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5243 device_printf(dev, "sysctl error: %d\n", error);
5248 #endif /* IXL_DEBUG_SYSCTL */
5253 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5256 struct ixl_vsi *vsi;
5257 struct i40e_vsi_context vsi_ctx;
5259 uint16_t first_queue;
5260 enum i40e_status_code code;
5265 vsi_ctx.pf_num = hw->pf_id;
5266 vsi_ctx.uplink_seid = pf->veb_seid;
5267 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5268 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5269 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5271 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5273 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5274 vsi_ctx.info.switch_id = htole16(0);
5276 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5277 vsi_ctx.info.sec_flags = 0;
5278 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5279 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5281 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5282 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5283 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5285 vsi_ctx.info.valid_sections |=
5286 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5287 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5288 first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5289 for (i = 0; i < IXLV_MAX_QUEUES; i++)
5290 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5291 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5292 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5294 vsi_ctx.info.tc_mapping[0] = htole16(
5295 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5296 (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5298 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5299 if (code != I40E_SUCCESS)
5300 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5301 vf->vsi.seid = vsi_ctx.seid;
5302 vf->vsi.vsi_num = vsi_ctx.vsi_number;
5303 vf->vsi.first_queue = first_queue;
5304 vf->vsi.num_queues = IXLV_MAX_QUEUES;
5306 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5307 if (code != I40E_SUCCESS)
5308 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5310 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5311 if (code != I40E_SUCCESS) {
5312 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5313 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5314 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5317 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5322 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5329 error = ixl_vf_alloc_vsi(pf, vf);
5333 vf->vsi.hw_filters_add = 0;
5334 vf->vsi.hw_filters_del = 0;
5335 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5336 ixl_reconfigure_filters(&vf->vsi);
5342 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5349 * Two queues are mapped in a single register, so we have to do some
5350 * gymnastics to convert the queue number into a register index and
5354 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5356 qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5357 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5358 qtable |= val << shift;
5359 wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5363 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5372 * Contiguous mappings aren't actually supported by the hardware,
5373 * so we have to use non-contiguous mappings.
5375 wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5376 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5378 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5379 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5381 for (i = 0; i < vf->vsi.num_queues; i++) {
5382 qtable = (vf->vsi.first_queue + i) <<
5383 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5385 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5388 /* Map queues allocated to VF to its VSI. */
5389 for (i = 0; i < vf->vsi.num_queues; i++)
5390 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5392 /* Set rest of VSI queues as unused. */
5393 for (; i < IXL_MAX_VSI_QUEUES; i++)
5394 ixl_vf_map_vsi_queue(hw, vf, i,
5395 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5401 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5410 i40e_aq_delete_element(hw, vsi->seid, NULL);
5414 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5417 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5422 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5425 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5426 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5431 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5434 uint32_t vfint_reg, vpint_reg;
5439 ixl_vf_vsi_release(pf, &vf->vsi);
5441 /* Index 0 has a special register. */
5442 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5444 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5445 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5446 ixl_vf_disable_queue_intr(hw, vfint_reg);
5449 /* Index 0 has a special register. */
5450 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5452 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5453 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5454 ixl_vf_unregister_intr(hw, vpint_reg);
5457 vf->vsi.num_queues = 0;
5461 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5465 uint16_t global_vf_num;
5469 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5471 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5472 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5473 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5474 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5475 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5484 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5491 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5492 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5493 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5496 ixl_reinit_vf(pf, vf);
5500 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5503 uint32_t vfrstat, vfrtrig;
5508 error = ixl_flush_pcie(pf, vf);
5510 device_printf(pf->dev,
5511 "Timed out waiting for PCIe activity to stop on VF-%d\n",
5514 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5517 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5518 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5522 if (i == IXL_VF_RESET_TIMEOUT)
5523 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5525 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5527 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5528 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5529 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5531 if (vf->vsi.seid != 0)
5532 ixl_disable_rings(&vf->vsi);
5534 ixl_vf_release_resources(pf, vf);
5535 ixl_vf_setup_vsi(pf, vf);
5536 ixl_vf_map_queues(pf, vf);
5538 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5543 ixl_vc_opcode_str(uint16_t op)
5547 case I40E_VIRTCHNL_OP_VERSION:
5549 case I40E_VIRTCHNL_OP_RESET_VF:
5550 return ("RESET_VF");
5551 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5552 return ("GET_VF_RESOURCES");
5553 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5554 return ("CONFIG_TX_QUEUE");
5555 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5556 return ("CONFIG_RX_QUEUE");
5557 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5558 return ("CONFIG_VSI_QUEUES");
5559 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5560 return ("CONFIG_IRQ_MAP");
5561 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5562 return ("ENABLE_QUEUES");
5563 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5564 return ("DISABLE_QUEUES");
5565 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5566 return ("ADD_ETHER_ADDRESS");
5567 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5568 return ("DEL_ETHER_ADDRESS");
5569 case I40E_VIRTCHNL_OP_ADD_VLAN:
5570 return ("ADD_VLAN");
5571 case I40E_VIRTCHNL_OP_DEL_VLAN:
5572 return ("DEL_VLAN");
5573 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5574 return ("CONFIG_PROMISCUOUS_MODE");
5575 case I40E_VIRTCHNL_OP_GET_STATS:
5576 return ("GET_STATS");
5577 case I40E_VIRTCHNL_OP_FCOE:
5579 case I40E_VIRTCHNL_OP_EVENT:
5587 ixl_vc_opcode_level(uint16_t opcode)
5591 case I40E_VIRTCHNL_OP_GET_STATS:
5599 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5600 enum i40e_status_code status, void *msg, uint16_t len)
5606 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5608 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5609 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5610 ixl_vc_opcode_str(op), op, status, vf->vf_num);
5612 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5616 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5619 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5623 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5624 enum i40e_status_code status, const char *file, int line)
5627 I40E_VC_DEBUG(pf, 1,
5628 "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5629 ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5630 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5634 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5637 struct i40e_virtchnl_version_info reply;
5639 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5640 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5645 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5646 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5647 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5652 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5656 if (msg_size != 0) {
5657 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5662 ixl_reset_vf(pf, vf);
5664 /* No response to a reset message. */
5668 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5671 struct i40e_virtchnl_vf_resource reply;
5673 if (msg_size != 0) {
5674 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5679 bzero(&reply, sizeof(reply));
5681 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5684 reply.num_queue_pairs = vf->vsi.num_queues;
5685 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5686 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5687 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5688 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5689 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5691 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5692 I40E_SUCCESS, &reply, sizeof(reply));
5696 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5697 struct i40e_virtchnl_txq_info *info)
5700 struct i40e_hmc_obj_txq txq;
5701 uint16_t global_queue_num, global_vf_num;
5702 enum i40e_status_code status;
5706 global_queue_num = vf->vsi.first_queue + info->queue_id;
5707 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5708 bzero(&txq, sizeof(txq));
5710 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5711 if (status != I40E_SUCCESS)
5714 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5716 txq.head_wb_ena = info->headwb_enabled;
5717 txq.head_wb_addr = info->dma_headwb_addr;
5718 txq.qlen = info->ring_len;
5719 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5720 txq.rdylist_act = 0;
5722 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5723 if (status != I40E_SUCCESS)
5726 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5727 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5728 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5729 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5736 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5737 struct i40e_virtchnl_rxq_info *info)
5740 struct i40e_hmc_obj_rxq rxq;
5741 uint16_t global_queue_num;
5742 enum i40e_status_code status;
5745 global_queue_num = vf->vsi.first_queue + info->queue_id;
5746 bzero(&rxq, sizeof(rxq));
5748 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5751 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5752 info->max_pkt_size < ETHER_MIN_LEN)
5755 if (info->splithdr_enabled) {
5756 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5759 rxq.hsplit_0 = info->rx_split_pos &
5760 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5761 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5762 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5763 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5764 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5769 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5770 if (status != I40E_SUCCESS)
5773 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5774 rxq.qlen = info->ring_len;
5776 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5782 rxq.rxmax = info->max_pkt_size;
5783 rxq.tphrdesc_ena = 1;
5784 rxq.tphwdesc_ena = 1;
5785 rxq.tphdata_ena = 1;
5786 rxq.tphhead_ena = 1;
5790 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5791 if (status != I40E_SUCCESS)
5798 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5801 struct i40e_virtchnl_vsi_queue_config_info *info;
5802 struct i40e_virtchnl_queue_pair_info *pair;
5805 if (msg_size < sizeof(*info)) {
5806 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5812 if (info->num_queue_pairs == 0) {
5813 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5818 if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5819 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5824 if (info->vsi_id != vf->vsi.vsi_num) {
5825 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5830 for (i = 0; i < info->num_queue_pairs; i++) {
5831 pair = &info->qpair[i];
5833 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5834 pair->rxq.vsi_id != vf->vsi.vsi_num ||
5835 pair->txq.queue_id != pair->rxq.queue_id ||
5836 pair->txq.queue_id >= vf->vsi.num_queues) {
5838 i40e_send_vf_nack(pf, vf,
5839 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5843 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5844 i40e_send_vf_nack(pf, vf,
5845 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5849 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5850 i40e_send_vf_nack(pf, vf,
5851 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5856 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5860 ixl_vf_set_qctl(struct ixl_pf *pf,
5861 const struct i40e_virtchnl_vector_map *vector,
5862 enum i40e_queue_type cur_type, uint16_t cur_queue,
5863 enum i40e_queue_type *last_type, uint16_t *last_queue)
5865 uint32_t offset, qctl;
5868 if (cur_type == I40E_QUEUE_TYPE_RX) {
5869 offset = I40E_QINT_RQCTL(cur_queue);
5870 itr_indx = vector->rxitr_idx;
5872 offset = I40E_QINT_TQCTL(cur_queue);
5873 itr_indx = vector->txitr_idx;
5876 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5877 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5878 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5879 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5880 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5882 wr32(&pf->hw, offset, qctl);
5884 *last_type = cur_type;
5885 *last_queue = cur_queue;
5889 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5890 const struct i40e_virtchnl_vector_map *vector)
5894 enum i40e_queue_type type, last_type;
5895 uint32_t lnklst_reg;
5896 uint16_t rxq_map, txq_map, cur_queue, last_queue;
5900 rxq_map = vector->rxq_map;
5901 txq_map = vector->txq_map;
5903 last_queue = IXL_END_OF_INTR_LNKLST;
5904 last_type = I40E_QUEUE_TYPE_RX;
5907 * The datasheet says to optimize performance, RX queues and TX queues
5908 * should be interleaved in the interrupt linked list, so we process
5909 * both at once here.
5911 while ((rxq_map != 0) || (txq_map != 0)) {
5913 qindex = ffs(txq_map) - 1;
5914 type = I40E_QUEUE_TYPE_TX;
5915 cur_queue = vf->vsi.first_queue + qindex;
5916 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5917 &last_type, &last_queue);
5918 txq_map &= ~(1 << qindex);
5922 qindex = ffs(rxq_map) - 1;
5923 type = I40E_QUEUE_TYPE_RX;
5924 cur_queue = vf->vsi.first_queue + qindex;
5925 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5926 &last_type, &last_queue);
5927 rxq_map &= ~(1 << qindex);
5931 if (vector->vector_id == 0)
5932 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5934 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5936 wr32(hw, lnklst_reg,
5937 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5938 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5944 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5947 struct i40e_virtchnl_irq_map_info *map;
5948 struct i40e_virtchnl_vector_map *vector;
5950 int i, largest_txq, largest_rxq;
5954 if (msg_size < sizeof(*map)) {
5955 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5961 if (map->num_vectors == 0) {
5962 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5967 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5968 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5973 for (i = 0; i < map->num_vectors; i++) {
5974 vector = &map->vecmap[i];
5976 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5977 vector->vsi_id != vf->vsi.vsi_num) {
5978 i40e_send_vf_nack(pf, vf,
5979 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5983 if (vector->rxq_map != 0) {
5984 largest_rxq = fls(vector->rxq_map) - 1;
5985 if (largest_rxq >= vf->vsi.num_queues) {
5986 i40e_send_vf_nack(pf, vf,
5987 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5993 if (vector->txq_map != 0) {
5994 largest_txq = fls(vector->txq_map) - 1;
5995 if (largest_txq >= vf->vsi.num_queues) {
5996 i40e_send_vf_nack(pf, vf,
5997 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6003 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6004 vector->txitr_idx > IXL_MAX_ITR_IDX) {
6005 i40e_send_vf_nack(pf, vf,
6006 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6011 ixl_vf_config_vector(pf, vf, vector);
6014 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6018 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6021 struct i40e_virtchnl_queue_select *select;
6024 if (msg_size != sizeof(*select)) {
6025 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6031 if (select->vsi_id != vf->vsi.vsi_num ||
6032 select->rx_queues == 0 || select->tx_queues == 0) {
6033 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6038 error = ixl_enable_rings(&vf->vsi);
6040 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6045 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6049 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6050 void *msg, uint16_t msg_size)
6052 struct i40e_virtchnl_queue_select *select;
6055 if (msg_size != sizeof(*select)) {
6056 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6062 if (select->vsi_id != vf->vsi.vsi_num ||
6063 select->rx_queues == 0 || select->tx_queues == 0) {
6064 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6069 error = ixl_disable_rings(&vf->vsi);
6071 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6076 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6080 ixl_zero_mac(const uint8_t *addr)
6082 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6084 return (cmp_etheraddr(addr, zero));
6088 ixl_bcast_mac(const uint8_t *addr)
6091 return (cmp_etheraddr(addr, ixl_bcast_addr));
6095 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6098 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6102 * If the VF is not allowed to change its MAC address, don't let it
6103 * set a MAC filter for an address that is not a multicast address and
6104 * is not its assigned MAC.
6106 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6107 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6114 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6117 struct i40e_virtchnl_ether_addr_list *addr_list;
6118 struct i40e_virtchnl_ether_addr *addr;
6119 struct ixl_vsi *vsi;
6121 size_t expected_size;
6125 if (msg_size < sizeof(*addr_list)) {
6126 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6132 expected_size = sizeof(*addr_list) +
6133 addr_list->num_elements * sizeof(*addr);
6135 if (addr_list->num_elements == 0 ||
6136 addr_list->vsi_id != vsi->vsi_num ||
6137 msg_size != expected_size) {
6138 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6143 for (i = 0; i < addr_list->num_elements; i++) {
6144 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6145 i40e_send_vf_nack(pf, vf,
6146 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6151 for (i = 0; i < addr_list->num_elements; i++) {
6152 addr = &addr_list->list[i];
6153 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6156 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6160 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6163 struct i40e_virtchnl_ether_addr_list *addr_list;
6164 struct i40e_virtchnl_ether_addr *addr;
6165 size_t expected_size;
6168 if (msg_size < sizeof(*addr_list)) {
6169 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6175 expected_size = sizeof(*addr_list) +
6176 addr_list->num_elements * sizeof(*addr);
6178 if (addr_list->num_elements == 0 ||
6179 addr_list->vsi_id != vf->vsi.vsi_num ||
6180 msg_size != expected_size) {
6181 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6186 for (i = 0; i < addr_list->num_elements; i++) {
6187 addr = &addr_list->list[i];
6188 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6189 i40e_send_vf_nack(pf, vf,
6190 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6195 for (i = 0; i < addr_list->num_elements; i++) {
6196 addr = &addr_list->list[i];
6197 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6200 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6203 static enum i40e_status_code
6204 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6206 struct i40e_vsi_context vsi_ctx;
6208 vsi_ctx.seid = vf->vsi.seid;
6210 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6211 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6212 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6213 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6214 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6218 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6221 struct i40e_virtchnl_vlan_filter_list *filter_list;
6222 enum i40e_status_code code;
6223 size_t expected_size;
6226 if (msg_size < sizeof(*filter_list)) {
6227 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6233 expected_size = sizeof(*filter_list) +
6234 filter_list->num_elements * sizeof(uint16_t);
6235 if (filter_list->num_elements == 0 ||
6236 filter_list->vsi_id != vf->vsi.vsi_num ||
6237 msg_size != expected_size) {
6238 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6243 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6244 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6249 for (i = 0; i < filter_list->num_elements; i++) {
6250 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6251 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6257 code = ixl_vf_enable_vlan_strip(pf, vf);
6258 if (code != I40E_SUCCESS) {
6259 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6263 for (i = 0; i < filter_list->num_elements; i++)
6264 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6266 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6270 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6273 struct i40e_virtchnl_vlan_filter_list *filter_list;
6275 size_t expected_size;
6277 if (msg_size < sizeof(*filter_list)) {
6278 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6284 expected_size = sizeof(*filter_list) +
6285 filter_list->num_elements * sizeof(uint16_t);
6286 if (filter_list->num_elements == 0 ||
6287 filter_list->vsi_id != vf->vsi.vsi_num ||
6288 msg_size != expected_size) {
6289 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6294 for (i = 0; i < filter_list->num_elements; i++) {
6295 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6296 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6302 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6303 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6308 for (i = 0; i < filter_list->num_elements; i++)
6309 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6311 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6315 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6316 void *msg, uint16_t msg_size)
6318 struct i40e_virtchnl_promisc_info *info;
6319 enum i40e_status_code code;
6321 if (msg_size != sizeof(*info)) {
6322 i40e_send_vf_nack(pf, vf,
6323 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6327 if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6328 i40e_send_vf_nack(pf, vf,
6329 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6334 if (info->vsi_id != vf->vsi.vsi_num) {
6335 i40e_send_vf_nack(pf, vf,
6336 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6340 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6341 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6342 if (code != I40E_SUCCESS) {
6343 i40e_send_vf_nack(pf, vf,
6344 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6348 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6349 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6350 if (code != I40E_SUCCESS) {
6351 i40e_send_vf_nack(pf, vf,
6352 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6356 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6360 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6363 struct i40e_virtchnl_queue_select *queue;
6365 if (msg_size != sizeof(*queue)) {
6366 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6372 if (queue->vsi_id != vf->vsi.vsi_num) {
6373 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6378 ixl_update_eth_stats(&vf->vsi);
6380 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6381 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6385 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6389 uint16_t vf_num, msg_size;
6392 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6393 opcode = le32toh(event->desc.cookie_high);
6395 if (vf_num >= pf->num_vfs) {
6396 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6400 vf = &pf->vfs[vf_num];
6401 msg = event->msg_buf;
6402 msg_size = event->msg_len;
6404 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6405 "Got msg %s(%d) from VF-%d of size %d\n",
6406 ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6409 case I40E_VIRTCHNL_OP_VERSION:
6410 ixl_vf_version_msg(pf, vf, msg, msg_size);
6412 case I40E_VIRTCHNL_OP_RESET_VF:
6413 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6415 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6416 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6418 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6419 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6421 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6422 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6424 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6425 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6427 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6428 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6430 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6431 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6433 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6434 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6436 case I40E_VIRTCHNL_OP_ADD_VLAN:
6437 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6439 case I40E_VIRTCHNL_OP_DEL_VLAN:
6440 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6442 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6443 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6445 case I40E_VIRTCHNL_OP_GET_STATS:
6446 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6449 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6450 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6451 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6453 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6458 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6460 ixl_handle_vflr(void *arg, int pending)
6464 uint16_t global_vf_num;
6465 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6472 for (i = 0; i < pf->num_vfs; i++) {
6473 global_vf_num = hw->func_caps.vf_base_id + i;
6475 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6476 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6477 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6478 if (vflrstat & vflrstat_mask) {
6479 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6482 ixl_reinit_vf(pf, &pf->vfs[i]);
6486 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6487 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6488 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6495 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6499 case I40E_AQ_RC_EPERM:
6501 case I40E_AQ_RC_ENOENT:
6503 case I40E_AQ_RC_ESRCH:
6505 case I40E_AQ_RC_EINTR:
6507 case I40E_AQ_RC_EIO:
6509 case I40E_AQ_RC_ENXIO:
6511 case I40E_AQ_RC_E2BIG:
6513 case I40E_AQ_RC_EAGAIN:
6515 case I40E_AQ_RC_ENOMEM:
6517 case I40E_AQ_RC_EACCES:
6519 case I40E_AQ_RC_EFAULT:
6521 case I40E_AQ_RC_EBUSY:
6523 case I40E_AQ_RC_EEXIST:
6525 case I40E_AQ_RC_EINVAL:
6527 case I40E_AQ_RC_ENOTTY:
6529 case I40E_AQ_RC_ENOSPC:
6531 case I40E_AQ_RC_ENOSYS:
6533 case I40E_AQ_RC_ERANGE:
6535 case I40E_AQ_RC_EFLUSHED:
6536 return (EINVAL); /* No exact equivalent in errno.h */
6537 case I40E_AQ_RC_BAD_ADDR:
6539 case I40E_AQ_RC_EMODE:
6541 case I40E_AQ_RC_EFBIG:
6549 ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6553 struct ixl_vsi *pf_vsi;
6554 enum i40e_status_code ret;
6557 pf = device_get_softc(dev);
6562 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6565 if (pf->vfs == NULL) {
6570 for (i = 0; i < num_vfs; i++)
6571 sysctl_ctx_init(&pf->vfs[i].ctx);
6573 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6574 1, FALSE, FALSE, &pf->veb_seid, NULL);
6575 if (ret != I40E_SUCCESS) {
6576 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6577 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6582 ixl_configure_msix(pf);
6583 ixl_enable_adminq(hw);
6585 pf->num_vfs = num_vfs;
6590 free(pf->vfs, M_IXL);
6597 ixl_uninit_iov(device_t dev)
6601 struct ixl_vsi *vsi;
6606 pf = device_get_softc(dev);
6612 for (i = 0; i < pf->num_vfs; i++) {
6613 if (pf->vfs[i].vsi.seid != 0)
6614 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6617 if (pf->veb_seid != 0) {
6618 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6622 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6623 ixl_disable_intr(vsi);
6626 num_vfs = pf->num_vfs;
6632 /* Do this after the unlock as sysctl_ctx_free might sleep. */
6633 for (i = 0; i < num_vfs; i++)
6634 sysctl_ctx_free(&vfs[i].ctx);
6639 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6641 char sysctl_name[QUEUE_NAME_LEN];
6648 pf = device_get_softc(dev);
6649 vf = &pf->vfs[vfnum];
6655 vf->vf_flags = VF_FLAG_ENABLED;
6656 SLIST_INIT(&vf->vsi.ftl);
6658 error = ixl_vf_setup_vsi(pf, vf);
6662 if (nvlist_exists_binary(params, "mac-addr")) {
6663 mac = nvlist_get_binary(params, "mac-addr", &size);
6664 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6666 if (nvlist_get_bool(params, "allow-set-mac"))
6667 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6670 * If the administrator has not specified a MAC address then
6671 * we must allow the VF to choose one.
6673 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6675 if (nvlist_get_bool(params, "mac-anti-spoof"))
6676 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6678 if (nvlist_get_bool(params, "allow-promisc"))
6679 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6681 vf->vf_flags |= VF_FLAG_VLAN_CAP;
6683 ixl_reset_vf(pf, vf);
6687 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6688 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6693 #endif /* PCI_IOV */