1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
40 #include "ixl_iw_int.h"
44 #include "ixl_pf_iov.h"
47 /*********************************************************************
49 *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR 2
51 #define IXL_DRIVER_VERSION_MINOR 3
52 #define IXL_DRIVER_VERSION_BUILD 1
54 #define IXL_DRIVER_VERSION_STRING \
55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \
56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \
57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
59 /*********************************************************************
62 * Used by probe to select devices to load on
64 * ( Vendor ID, Device ID, Branding String )
65 *********************************************************************/
67 static pci_vendor_info_t ixl_vendor_info_array[] =
69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 /* required last entry */
93 /*********************************************************************
95 *********************************************************************/
96 /*** IFLIB interface ***/
97 static void *ixl_register(device_t dev);
98 static int ixl_if_attach_pre(if_ctx_t ctx);
99 static int ixl_if_attach_post(if_ctx_t ctx);
100 static int ixl_if_detach(if_ctx_t ctx);
101 static int ixl_if_shutdown(if_ctx_t ctx);
102 static int ixl_if_suspend(if_ctx_t ctx);
103 static int ixl_if_resume(if_ctx_t ctx);
104 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void ixl_if_enable_intr(if_ctx_t ctx);
106 static void ixl_if_disable_intr(if_ctx_t ctx);
107 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void ixl_if_queues_free(if_ctx_t ctx);
112 static void ixl_if_update_admin_status(if_ctx_t ctx);
113 static void ixl_if_multi_set(if_ctx_t ctx);
114 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int ixl_if_media_change(if_ctx_t ctx);
117 static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 static void ixl_if_vflr_handle(if_ctx_t ctx);
130 static void ixl_save_pf_tunables(struct ixl_pf *);
131 static int ixl_allocate_pci_resources(struct ixl_pf *);
132 static void ixl_setup_ssctx(struct ixl_pf *pf);
133 static void ixl_admin_timer(void *arg);
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
139 static device_method_t ixl_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_register, ixl_register),
142 DEVMETHOD(device_probe, iflib_device_probe),
143 DEVMETHOD(device_attach, iflib_device_attach),
144 DEVMETHOD(device_detach, iflib_device_detach),
145 DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
148 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
149 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
154 static driver_t ixl_driver = {
155 "ixl", ixl_methods, sizeof(struct ixl_pf),
158 devclass_t ixl_devclass;
159 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
161 MODULE_VERSION(ixl, 3);
163 MODULE_DEPEND(ixl, pci, 1, 1, 1);
164 MODULE_DEPEND(ixl, ether, 1, 1, 1);
165 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
167 static device_method_t ixl_if_methods[] = {
168 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
169 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
170 DEVMETHOD(ifdi_detach, ixl_if_detach),
171 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
172 DEVMETHOD(ifdi_suspend, ixl_if_suspend),
173 DEVMETHOD(ifdi_resume, ixl_if_resume),
174 DEVMETHOD(ifdi_init, ixl_if_init),
175 DEVMETHOD(ifdi_stop, ixl_if_stop),
176 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
177 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
178 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
179 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
181 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
182 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
183 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
184 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
185 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
186 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
187 DEVMETHOD(ifdi_media_status, ixl_if_media_status),
188 DEVMETHOD(ifdi_media_change, ixl_if_media_change),
189 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
190 DEVMETHOD(ifdi_timer, ixl_if_timer),
191 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
192 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
193 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
194 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
195 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
198 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
199 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
200 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
201 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
208 static driver_t ixl_if_driver = {
209 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
213 ** TUNEABLE PARAMETERS:
216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217 "ixl driver parameters");
221 * Leave this on unless you need to send flow control
222 * frames (or other control frames) from software
224 static int ixl_enable_tx_fc_filter = 1;
225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
226 &ixl_enable_tx_fc_filter);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
228 &ixl_enable_tx_fc_filter, 0,
229 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
233 static int ixl_debug_recovery_mode = 0;
234 TUNABLE_INT("hw.ixl.debug_recovery_mode",
235 &ixl_debug_recovery_mode);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
237 &ixl_debug_recovery_mode, 0,
238 "Act like when FW entered recovery mode (for debuging)");
241 static int ixl_i2c_access_method = 0;
242 TUNABLE_INT("hw.ixl.i2c_access_method",
243 &ixl_i2c_access_method);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
245 &ixl_i2c_access_method, 0,
246 IXL_SYSCTL_HELP_I2C_METHOD);
248 static int ixl_enable_vf_loopback = 1;
249 TUNABLE_INT("hw.ixl.enable_vf_loopback",
250 &ixl_enable_vf_loopback);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
252 &ixl_enable_vf_loopback, 0,
253 IXL_SYSCTL_HELP_VF_LOOPBACK);
256 * Different method for processing TX descriptor
259 static int ixl_enable_head_writeback = 1;
260 TUNABLE_INT("hw.ixl.enable_head_writeback",
261 &ixl_enable_head_writeback);
262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
263 &ixl_enable_head_writeback, 0,
264 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
266 static int ixl_core_debug_mask = 0;
267 TUNABLE_INT("hw.ixl.core_debug_mask",
268 &ixl_core_debug_mask);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
270 &ixl_core_debug_mask, 0,
271 "Display debug statements that are printed in non-shared code");
273 static int ixl_shared_debug_mask = 0;
274 TUNABLE_INT("hw.ixl.shared_debug_mask",
275 &ixl_shared_debug_mask);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
277 &ixl_shared_debug_mask, 0,
278 "Display debug statements that are printed in shared code");
282 ** Controls for Interrupt Throttling
283 ** - true/false for dynamic adjustment
284 ** - default values for static ITR
286 static int ixl_dynamic_rx_itr = 0;
287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
289 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
291 static int ixl_dynamic_tx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
294 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
297 static int ixl_rx_itr = IXL_ITR_8K;
298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300 &ixl_rx_itr, 0, "RX Interrupt Rate");
302 static int ixl_tx_itr = IXL_ITR_4K;
303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305 &ixl_tx_itr, 0, "TX Interrupt Rate");
307 static int ixl_flow_control = -1;
308 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
309 &ixl_flow_control, 0, "Initial Flow Control setting");
312 int ixl_enable_iwarp = 0;
313 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
314 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
315 &ixl_enable_iwarp, 0, "iWARP enabled");
317 #if __FreeBSD_version < 1100000
318 int ixl_limit_iwarp_msix = 1;
320 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
322 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
323 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
324 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
327 extern struct if_txrx ixl_txrx_hwb;
328 extern struct if_txrx ixl_txrx_dwb;
330 static struct if_shared_ctx ixl_sctx_init = {
331 .isc_magic = IFLIB_MAGIC,
332 .isc_q_align = PAGE_SIZE,
333 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
334 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
335 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
336 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
337 .isc_rx_maxsize = 16384,
338 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
339 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
344 .isc_admin_intrcnt = 1,
345 .isc_vendor_info = ixl_vendor_info_array,
346 .isc_driver_version = IXL_DRIVER_VERSION_STRING,
347 .isc_driver = &ixl_if_driver,
348 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
350 .isc_nrxd_min = {IXL_MIN_RING},
351 .isc_ntxd_min = {IXL_MIN_RING},
352 .isc_nrxd_max = {IXL_MAX_RING},
353 .isc_ntxd_max = {IXL_MAX_RING},
354 .isc_nrxd_default = {IXL_DEFAULT_RING},
355 .isc_ntxd_default = {IXL_DEFAULT_RING},
360 ixl_register(device_t dev)
362 return (&ixl_sctx_init);
366 ixl_allocate_pci_resources(struct ixl_pf *pf)
368 device_t dev = iflib_get_dev(pf->vsi.ctx);
369 struct i40e_hw *hw = &pf->hw;
374 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
377 if (!(pf->pci_mem)) {
378 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
382 /* Save off the PCI information */
383 hw->vendor_id = pci_get_vendor(dev);
384 hw->device_id = pci_get_device(dev);
385 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
386 hw->subsystem_vendor_id =
387 pci_read_config(dev, PCIR_SUBVEND_0, 2);
388 hw->subsystem_device_id =
389 pci_read_config(dev, PCIR_SUBDEV_0, 2);
391 hw->bus.device = pci_get_slot(dev);
392 hw->bus.func = pci_get_function(dev);
394 /* Save off register access information */
395 pf->osdep.mem_bus_space_tag =
396 rman_get_bustag(pf->pci_mem);
397 pf->osdep.mem_bus_space_handle =
398 rman_get_bushandle(pf->pci_mem);
399 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
400 pf->osdep.flush_reg = I40E_GLGEN_STAT;
403 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
404 pf->hw.back = &pf->osdep;
410 ixl_setup_ssctx(struct ixl_pf *pf)
412 if_softc_ctx_t scctx = pf->vsi.shared;
413 struct i40e_hw *hw = &pf->hw;
415 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
416 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
417 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
418 } else if (hw->mac.type == I40E_MAC_X722)
419 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
421 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
423 if (pf->vsi.enable_head_writeback) {
424 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
425 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
426 scctx->isc_txrx = &ixl_txrx_hwb;
428 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
429 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
430 scctx->isc_txrx = &ixl_txrx_dwb;
433 scctx->isc_txrx->ift_legacy_intr = ixl_intr;
434 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
435 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
436 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
437 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
438 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
439 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
440 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
441 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
442 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
443 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
447 ixl_admin_timer(void *arg)
449 struct ixl_pf *pf = (struct ixl_pf *)arg;
451 /* Fire off the admin task */
452 iflib_admin_intr_deferred(pf->vsi.ctx);
454 /* Reschedule the admin timer */
455 callout_schedule(&pf->admin_timer, hz/2);
459 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
461 struct ixl_vsi *vsi = &pf->vsi;
462 struct i40e_hw *hw = &pf->hw;
463 device_t dev = pf->dev;
465 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
467 i40e_get_mac_addr(hw, hw->mac.addr);
469 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
470 ixl_configure_intr0_msix(pf);
471 ixl_enable_intr0(hw);
480 ixl_if_attach_pre(if_ctx_t ctx)
486 enum i40e_get_fw_lldp_status_resp lldp_status;
487 struct i40e_filter_control_settings filter;
488 enum i40e_status_code status;
491 dev = iflib_get_dev(ctx);
492 pf = iflib_get_softc(ctx);
494 INIT_DBG_DEV(dev, "begin");
506 vsi->media = iflib_get_media(ctx);
507 vsi->shared = iflib_get_softc_ctx(ctx);
509 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
510 "%s:admin", device_get_nameunit(dev));
511 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
512 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
514 /* Save tunable values */
515 ixl_save_pf_tunables(pf);
517 /* Do PCI setup - map BAR0, etc */
518 if (ixl_allocate_pci_resources(pf)) {
519 device_printf(dev, "Allocation of PCI resources failed\n");
524 /* Establish a clean starting point */
526 i40e_set_mac_type(hw);
528 error = ixl_pf_reset(pf);
532 /* Initialize the shared code */
533 status = i40e_init_shared_code(hw);
535 device_printf(dev, "Unable to initialize shared code, error %s\n",
536 i40e_stat_str(hw, status));
541 /* Set up the admin queue */
542 hw->aq.num_arq_entries = IXL_AQ_LEN;
543 hw->aq.num_asq_entries = IXL_AQ_LEN;
544 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
545 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
547 status = i40e_init_adminq(hw);
548 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
549 device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
550 i40e_stat_str(hw, status));
554 ixl_print_nvm_version(pf);
556 if (status == I40E_ERR_FIRMWARE_API_VERSION) {
557 device_printf(dev, "The driver for the device stopped "
558 "because the NVM image is newer than expected.\n");
559 device_printf(dev, "You must install the most recent version of "
560 "the network driver.\n");
565 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
566 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
567 device_printf(dev, "The driver for the device detected "
568 "a newer version of the NVM image than expected.\n");
569 device_printf(dev, "Please install the most recent version "
570 "of the network driver.\n");
571 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
572 device_printf(dev, "The driver for the device detected "
573 "an older version of the NVM image than expected.\n");
574 device_printf(dev, "Please update the NVM image.\n");
577 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
578 error = ixl_attach_pre_recovery_mode(pf);
585 i40e_clear_pxe_mode(hw);
587 /* Get capabilities from the device */
588 error = ixl_get_hw_capabilities(pf);
590 device_printf(dev, "get_hw_capabilities failed: %d\n",
595 /* Set up host memory cache */
596 error = ixl_setup_hmc(pf);
600 /* Disable LLDP from the firmware for certain NVM versions */
601 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
602 (pf->hw.aq.fw_maj_ver < 4)) {
603 i40e_aq_stop_lldp(hw, true, false, NULL);
604 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
607 /* Try enabling Energy Efficient Ethernet (EEE) mode */
608 if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
609 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
611 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
613 /* Get MAC addresses from hardware */
614 i40e_get_mac_addr(hw, hw->mac.addr);
615 error = i40e_validate_mac_addr(hw->mac.addr);
617 device_printf(dev, "validate_mac_addr failed: %d\n", error);
620 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
621 iflib_set_mac(ctx, hw->mac.addr);
622 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
624 /* Set up the device filtering */
625 bzero(&filter, sizeof(filter));
626 filter.enable_ethtype = TRUE;
627 filter.enable_macvlan = TRUE;
628 filter.enable_fdir = FALSE;
629 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
630 if (i40e_set_filter_control(hw, &filter))
631 device_printf(dev, "i40e_set_filter_control() failed\n");
633 /* Query device FW LLDP status */
634 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
635 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
636 atomic_set_32(&pf->state,
637 IXL_PF_STATE_FW_LLDP_DISABLED);
639 atomic_clear_32(&pf->state,
640 IXL_PF_STATE_FW_LLDP_DISABLED);
644 /* Tell FW to apply DCB config on link up */
645 i40e_aq_set_dcb_parameters(hw, true, NULL);
647 /* Fill out iflib parameters */
650 INIT_DBG_DEV(dev, "end");
654 ixl_shutdown_hmc(pf);
656 i40e_shutdown_adminq(hw);
658 ixl_free_pci_resources(pf);
660 mtx_lock(&pf->admin_mtx);
661 callout_stop(&pf->admin_timer);
662 mtx_unlock(&pf->admin_mtx);
663 mtx_destroy(&pf->admin_mtx);
668 ixl_if_attach_post(if_ctx_t ctx)
675 enum i40e_status_code status;
677 dev = iflib_get_dev(ctx);
678 pf = iflib_get_softc(ctx);
680 INIT_DBG_DEV(dev, "begin");
683 vsi->ifp = iflib_get_ifp(ctx);
686 /* Save off determined number of queues for interface */
687 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
688 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
690 /* Setup OS network interface / ifnet */
691 if (ixl_setup_interface(dev, pf)) {
692 device_printf(dev, "interface setup failed!\n");
697 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
698 /* Keep admin queue interrupts active while driver is loaded */
699 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
700 ixl_configure_intr0_msix(pf);
701 ixl_enable_intr0(hw);
704 ixl_add_sysctls_recovery_mode(pf);
706 /* Start the admin timer */
707 mtx_lock(&pf->admin_mtx);
708 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
709 mtx_unlock(&pf->admin_mtx);
713 /* Determine link state */
714 if (ixl_attach_get_link_status(pf)) {
719 error = ixl_switch_config(pf);
721 device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
726 /* Add protocol filters to list */
727 ixl_init_filters(vsi);
729 /* Init queue allocation manager */
730 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
732 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
736 /* reserve a contiguous allocation for the PF's VSI */
737 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
738 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
740 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
744 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
745 pf->qtag.num_allocated, pf->qtag.num_active);
747 /* Limit PHY interrupts to link, autoneg, and modules failure */
748 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
751 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
752 " aq_err %s\n", i40e_stat_str(hw, status),
753 i40e_aq_str(hw, hw->aq.asq_last_status));
757 /* Get the bus configuration and set the shared code */
758 ixl_get_bus_info(pf);
760 /* Keep admin queue interrupts active while driver is loaded */
761 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
762 ixl_configure_intr0_msix(pf);
763 ixl_enable_intr0(hw);
766 /* Set initial advertised speed sysctl value */
767 ixl_set_initial_advertised_speeds(pf);
769 /* Initialize statistics & add sysctls */
770 ixl_add_device_sysctls(pf);
771 ixl_pf_reset_stats(pf);
772 ixl_update_stats_counters(pf);
773 ixl_add_hw_stats(pf);
776 * Driver may have been reloaded. Ensure that the link state
777 * is consistent with current settings.
779 ixl_set_link(pf, (pf->state & IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0);
781 hw->phy.get_link_info = true;
782 i40e_get_link_status(hw, &pf->link_up);
783 ixl_update_link_status(pf);
786 ixl_initialize_sriov(pf);
790 if (hw->func_caps.iwarp && ixl_enable_iwarp) {
791 pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
792 if (pf->iw_enabled) {
793 error = ixl_iw_pf_attach(pf);
796 "interfacing to iWARP driver failed: %d\n",
800 device_printf(dev, "iWARP ready\n");
802 device_printf(dev, "iWARP disabled on this device "
803 "(no MSI-X vectors)\n");
805 pf->iw_enabled = false;
806 device_printf(dev, "The device is not iWARP enabled\n");
809 /* Start the admin timer */
810 mtx_lock(&pf->admin_mtx);
811 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
812 mtx_unlock(&pf->admin_mtx);
814 INIT_DBG_DEV(dev, "end");
818 INIT_DEBUGOUT("end: error %d", error);
819 /* ixl_if_detach() is called on error from this */
824 * XXX: iflib always ignores the return value of detach()
825 * -> This means that this isn't allowed to fail
828 ixl_if_detach(if_ctx_t ctx)
830 struct ixl_pf *pf = iflib_get_softc(ctx);
831 struct ixl_vsi *vsi = &pf->vsi;
832 struct i40e_hw *hw = &pf->hw;
833 device_t dev = pf->dev;
834 enum i40e_status_code status;
839 INIT_DBG_DEV(dev, "begin");
841 /* Stop the admin timer */
842 mtx_lock(&pf->admin_mtx);
843 callout_stop(&pf->admin_timer);
844 mtx_unlock(&pf->admin_mtx);
845 mtx_destroy(&pf->admin_mtx);
848 if (ixl_enable_iwarp && pf->iw_enabled) {
849 error = ixl_iw_pf_detach(pf);
850 if (error == EBUSY) {
851 device_printf(dev, "iwarp in use; stop it first.\n");
856 /* Remove all previously allocated media types */
857 ifmedia_removeall(vsi->media);
859 /* Shutdown LAN HMC */
860 ixl_shutdown_hmc(pf);
862 /* Shutdown admin queue */
863 ixl_disable_intr0(hw);
864 status = i40e_shutdown_adminq(hw);
867 "i40e_shutdown_adminq() failed with status %s\n",
868 i40e_stat_str(hw, status));
870 ixl_pf_qmgr_destroy(&pf->qmgr);
871 ixl_free_pci_resources(pf);
872 ixl_free_filters(&vsi->ftl);
873 INIT_DBG_DEV(dev, "end");
878 ixl_if_shutdown(if_ctx_t ctx)
882 INIT_DEBUGOUT("ixl_if_shutdown: begin");
884 /* TODO: Call ixl_if_stop()? */
886 /* TODO: Then setup low power mode */
892 ixl_if_suspend(if_ctx_t ctx)
896 INIT_DEBUGOUT("ixl_if_suspend: begin");
898 /* TODO: Call ixl_if_stop()? */
900 /* TODO: Then setup low power mode */
906 ixl_if_resume(if_ctx_t ctx)
908 struct ifnet *ifp = iflib_get_ifp(ctx);
910 INIT_DEBUGOUT("ixl_if_resume: begin");
912 /* Read & clear wake-up registers */
914 /* Required after D3->D0 transition */
915 if (ifp->if_flags & IFF_UP)
922 ixl_if_init(if_ctx_t ctx)
924 struct ixl_pf *pf = iflib_get_softc(ctx);
925 struct ixl_vsi *vsi = &pf->vsi;
926 struct i40e_hw *hw = &pf->hw;
927 struct ifnet *ifp = iflib_get_ifp(ctx);
928 device_t dev = iflib_get_dev(ctx);
929 u8 tmpaddr[ETHER_ADDR_LEN];
932 if (IXL_PF_IN_RECOVERY_MODE(pf))
935 * If the aq is dead here, it probably means something outside of the driver
936 * did something to the adapter, like a PF reset.
937 * So, rebuild the driver's state here if that occurs.
939 if (!i40e_check_asq_alive(&pf->hw)) {
940 device_printf(dev, "Admin Queue is down; resetting...\n");
941 ixl_teardown_hw_structs(pf);
942 ixl_rebuild_hw_structs_after_reset(pf, false);
945 /* Get the latest mac address... User might use a LAA */
946 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
947 if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
948 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
949 ixl_del_all_vlan_filters(vsi, hw->mac.addr);
950 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
951 ret = i40e_aq_mac_address_write(hw,
952 I40E_AQC_WRITE_TYPE_LAA_ONLY,
955 device_printf(dev, "LLA address change failed!!\n");
959 * New filters are configured by ixl_reconfigure_filters
960 * at the end of ixl_init_locked.
964 iflib_set_mac(ctx, hw->mac.addr);
966 /* Prepare the VSI: rings, hmc contexts, etc... */
967 if (ixl_initialize_vsi(vsi)) {
968 device_printf(dev, "initialize vsi failed!!\n");
972 ixl_set_link(pf, true);
974 /* Reconfigure multicast filters in HW */
975 ixl_if_multi_set(ctx);
980 /* Set up MSI-X routing and the ITR settings */
981 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
982 ixl_configure_queue_intr_msix(pf);
983 ixl_configure_itr(pf);
985 ixl_configure_legacy(pf);
987 if (vsi->enable_head_writeback)
988 ixl_init_tx_cidx(vsi);
990 ixl_init_tx_rsqs(vsi);
992 ixl_enable_rings(vsi);
994 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
996 /* Re-add configure filters to HW */
997 ixl_reconfigure_filters(vsi);
999 /* Configure promiscuous mode */
1000 ixl_if_promisc_set(ctx, if_getflags(ifp));
1003 if (ixl_enable_iwarp && pf->iw_enabled) {
1004 ret = ixl_iw_pf_init(pf);
1007 "initialize iwarp failed, code %d\n", ret);
1013 ixl_if_stop(if_ctx_t ctx)
1015 struct ixl_pf *pf = iflib_get_softc(ctx);
1016 struct ifnet *ifp = iflib_get_ifp(ctx);
1017 struct ixl_vsi *vsi = &pf->vsi;
1019 INIT_DEBUGOUT("ixl_if_stop: begin\n");
1021 if (IXL_PF_IN_RECOVERY_MODE(pf))
1024 // TODO: This may need to be reworked
1026 /* Stop iWARP device */
1027 if (ixl_enable_iwarp && pf->iw_enabled)
1031 ixl_disable_rings_intr(vsi);
1032 ixl_disable_rings(pf, vsi, &pf->qtag);
1035 * Don't set link state if only reconfiguring
1036 * e.g. on MTU change.
1038 if ((if_getflags(ifp) & IFF_UP) == 0 &&
1039 (atomic_load_acq_32(&pf->state) &
1040 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) == 0)
1041 ixl_set_link(pf, false);
1045 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1047 struct ixl_pf *pf = iflib_get_softc(ctx);
1048 struct ixl_vsi *vsi = &pf->vsi;
1049 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1050 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1051 int err, i, rid, vector = 0;
1054 MPASS(vsi->shared->isc_nrxqsets > 0);
1055 MPASS(vsi->shared->isc_ntxqsets > 0);
1057 /* Admin Que must use vector 0*/
1059 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1060 ixl_msix_adminq, pf, 0, "aq");
1062 iflib_irq_free(ctx, &vsi->irq);
1063 device_printf(iflib_get_dev(ctx),
1064 "Failed to register Admin Que handler");
1067 /* Create soft IRQ for handling VFLRs */
1068 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1070 /* Now set up the stations */
1071 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1074 snprintf(buf, sizeof(buf), "rxq%d", i);
1075 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1076 IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1077 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1078 * what's expected in the iflib context? */
1080 device_printf(iflib_get_dev(ctx),
1081 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1082 vsi->num_rx_queues = i + 1;
1085 rx_que->msix = vector;
1088 bzero(buf, sizeof(buf));
1090 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1091 snprintf(buf, sizeof(buf), "txq%d", i);
1092 iflib_softirq_alloc_generic(ctx,
1093 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1094 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1096 /* TODO: Maybe call a strategy function for this to figure out which
1097 * interrupts to map Tx queues to. I don't know if there's an immediately
1098 * better way than this other than a user-supplied map, though. */
1099 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1104 iflib_irq_free(ctx, &vsi->irq);
1105 rx_que = vsi->rx_queues;
1106 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1107 iflib_irq_free(ctx, &rx_que->que_irq);
1112 * Enable all interrupts
1115 * iflib_init_locked, after ixl_if_init()
1118 ixl_if_enable_intr(if_ctx_t ctx)
1120 struct ixl_pf *pf = iflib_get_softc(ctx);
1121 struct ixl_vsi *vsi = &pf->vsi;
1122 struct i40e_hw *hw = vsi->hw;
1123 struct ixl_rx_queue *que = vsi->rx_queues;
1125 ixl_enable_intr0(hw);
1126 /* Enable queue interrupts */
1127 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1128 /* TODO: Queue index parameter is probably wrong */
1129 ixl_enable_queue(hw, que->rxr.me);
1133 * Disable queue interrupts
1135 * Other interrupt causes need to remain active.
1138 ixl_if_disable_intr(if_ctx_t ctx)
1140 struct ixl_pf *pf = iflib_get_softc(ctx);
1141 struct ixl_vsi *vsi = &pf->vsi;
1142 struct i40e_hw *hw = vsi->hw;
1143 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1145 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1146 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1147 ixl_disable_queue(hw, rx_que->msix - 1);
1149 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1150 // stops queues from triggering interrupts
1151 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1156 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1158 struct ixl_pf *pf = iflib_get_softc(ctx);
1159 struct ixl_vsi *vsi = &pf->vsi;
1160 struct i40e_hw *hw = vsi->hw;
1161 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
1163 ixl_enable_queue(hw, rx_que->msix - 1);
1168 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1170 struct ixl_pf *pf = iflib_get_softc(ctx);
1171 struct ixl_vsi *vsi = &pf->vsi;
1172 struct i40e_hw *hw = vsi->hw;
1173 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1175 ixl_enable_queue(hw, tx_que->msix - 1);
1180 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1182 struct ixl_pf *pf = iflib_get_softc(ctx);
1183 struct ixl_vsi *vsi = &pf->vsi;
1184 if_softc_ctx_t scctx = vsi->shared;
1185 struct ixl_tx_queue *que;
1186 int i, j, error = 0;
1188 MPASS(scctx->isc_ntxqsets > 0);
1190 MPASS(scctx->isc_ntxqsets == ntxqsets);
1192 /* Allocate queue structure memory */
1193 if (!(vsi->tx_queues =
1194 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1195 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1199 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1200 struct tx_ring *txr = &que->txr;
1205 if (!vsi->enable_head_writeback) {
1206 /* Allocate report status array */
1207 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1208 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1212 /* Init report status array */
1213 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1214 txr->tx_rsq[j] = QIDX_INVALID;
1216 /* get the virtual and physical address of the hardware queues */
1217 txr->tail = I40E_QTX_TAIL(txr->me);
1218 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1219 txr->tx_paddr = paddrs[i * ntxqs];
1225 ixl_if_queues_free(ctx);
1230 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1232 struct ixl_pf *pf = iflib_get_softc(ctx);
1233 struct ixl_vsi *vsi = &pf->vsi;
1234 struct ixl_rx_queue *que;
1238 if_softc_ctx_t scctx = vsi->shared;
1239 MPASS(scctx->isc_nrxqsets > 0);
1241 MPASS(scctx->isc_nrxqsets == nrxqsets);
1244 /* Allocate queue structure memory */
1245 if (!(vsi->rx_queues =
1246 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1247 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1248 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1253 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1254 struct rx_ring *rxr = &que->rxr;
1259 /* get the virtual and physical address of the hardware queues */
1260 rxr->tail = I40E_QRX_TAIL(rxr->me);
1261 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1262 rxr->rx_paddr = paddrs[i * nrxqs];
1268 ixl_if_queues_free(ctx);
1273 ixl_if_queues_free(if_ctx_t ctx)
1275 struct ixl_pf *pf = iflib_get_softc(ctx);
1276 struct ixl_vsi *vsi = &pf->vsi;
1278 if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1279 struct ixl_tx_queue *que;
1282 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1283 struct tx_ring *txr = &que->txr;
1284 if (txr->tx_rsq != NULL) {
1285 free(txr->tx_rsq, M_IXL);
1291 if (vsi->tx_queues != NULL) {
1292 free(vsi->tx_queues, M_IXL);
1293 vsi->tx_queues = NULL;
1295 if (vsi->rx_queues != NULL) {
1296 free(vsi->rx_queues, M_IXL);
1297 vsi->rx_queues = NULL;
1300 if (!IXL_PF_IN_RECOVERY_MODE(pf))
1301 sysctl_ctx_free(&vsi->sysctl_ctx);
1305 ixl_update_link_status(struct ixl_pf *pf)
1307 struct ixl_vsi *vsi = &pf->vsi;
1308 struct i40e_hw *hw = &pf->hw;
1312 if (vsi->link_active == FALSE) {
1313 vsi->link_active = TRUE;
1314 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1315 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1316 ixl_link_up_msg(pf);
1318 ixl_broadcast_link_state(pf);
1321 } else { /* Link down */
1322 if (vsi->link_active == TRUE) {
1323 vsi->link_active = FALSE;
1324 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1326 ixl_broadcast_link_state(pf);
1333 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1335 device_t dev = pf->dev;
1336 u32 rxq_idx, qtx_ctl;
1338 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1339 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1340 qtx_ctl = e->desc.params.external.param1;
1342 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1343 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1347 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1349 enum i40e_status_code status = I40E_SUCCESS;
1350 struct i40e_arq_event_info event;
1351 struct i40e_hw *hw = &pf->hw;
1352 device_t dev = pf->dev;
1356 event.buf_len = IXL_AQ_BUF_SZ;
1357 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1358 if (!event.msg_buf) {
1359 device_printf(dev, "%s: Unable to allocate memory for Admin"
1360 " Queue event!\n", __func__);
1364 /* clean and process any events */
1366 status = i40e_clean_arq_element(hw, &event, pending);
1369 opcode = LE16_TO_CPU(event.desc.opcode);
1370 ixl_dbg(pf, IXL_DBG_AQ,
1371 "Admin Queue event: %#06x\n", opcode);
1373 case i40e_aqc_opc_get_link_status:
1374 ixl_link_event(pf, &event);
1376 case i40e_aqc_opc_send_msg_to_pf:
1378 ixl_handle_vf_msg(pf, &event);
1382 * This should only occur on no-drop queues, which
1383 * aren't currently configured.
1385 case i40e_aqc_opc_event_lan_overflow:
1386 ixl_handle_lan_overflow_event(pf, &event);
1391 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1393 free(event.msg_buf, M_IXL);
1395 /* Re-enable admin queue interrupt cause */
1396 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1397 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1398 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1404 ixl_if_update_admin_status(if_ctx_t ctx)
1406 struct ixl_pf *pf = iflib_get_softc(ctx);
1407 struct i40e_hw *hw = &pf->hw;
1410 if (IXL_PF_IS_RESETTING(pf))
1411 ixl_handle_empr_reset(pf);
1414 * Admin Queue is shut down while handling reset.
1415 * Don't proceed if it hasn't been re-initialized
1416 * e.g due to an issue with new FW.
1418 if (!i40e_check_asq_alive(&pf->hw))
1421 if (pf->state & IXL_PF_STATE_MDD_PENDING)
1422 ixl_handle_mdd_event(pf);
1424 ixl_process_adminq(pf, &pending);
1425 ixl_update_link_status(pf);
1428 * If there are still messages to process, reschedule ourselves.
1429 * Otherwise, re-enable our interrupt and go to sleep.
1432 iflib_admin_intr_deferred(ctx);
1434 ixl_enable_intr0(hw);
1438 ixl_if_multi_set(if_ctx_t ctx)
1440 struct ixl_pf *pf = iflib_get_softc(ctx);
1441 struct ixl_vsi *vsi = &pf->vsi;
1442 struct i40e_hw *hw = vsi->hw;
1445 IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1447 /* Delete filters for removed multicast addresses */
1448 ixl_del_multi(vsi, false);
1450 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1451 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1452 i40e_aq_set_vsi_multicast_promiscuous(hw,
1453 vsi->seid, TRUE, NULL);
1454 ixl_del_multi(vsi, true);
1459 IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1463 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1465 struct ixl_pf *pf = iflib_get_softc(ctx);
1466 struct ixl_vsi *vsi = &pf->vsi;
1468 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1469 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1470 ETHER_VLAN_ENCAP_LEN)
1473 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1474 ETHER_VLAN_ENCAP_LEN;
1480 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1482 struct ixl_pf *pf = iflib_get_softc(ctx);
1483 struct i40e_hw *hw = &pf->hw;
1485 INIT_DEBUGOUT("ixl_media_status: begin");
1487 ifmr->ifm_status = IFM_AVALID;
1488 ifmr->ifm_active = IFM_ETHER;
1494 ifmr->ifm_status |= IFM_ACTIVE;
1495 /* Hardware is always full-duplex */
1496 ifmr->ifm_active |= IFM_FDX;
1498 switch (hw->phy.link_info.phy_type) {
1500 case I40E_PHY_TYPE_100BASE_TX:
1501 ifmr->ifm_active |= IFM_100_TX;
1504 case I40E_PHY_TYPE_1000BASE_T:
1505 ifmr->ifm_active |= IFM_1000_T;
1507 case I40E_PHY_TYPE_1000BASE_SX:
1508 ifmr->ifm_active |= IFM_1000_SX;
1510 case I40E_PHY_TYPE_1000BASE_LX:
1511 ifmr->ifm_active |= IFM_1000_LX;
1513 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1514 ifmr->ifm_active |= IFM_1000_T;
1517 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1518 ifmr->ifm_active |= IFM_2500_T;
1521 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1522 ifmr->ifm_active |= IFM_5000_T;
1525 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1526 ifmr->ifm_active |= IFM_10G_TWINAX;
1528 case I40E_PHY_TYPE_10GBASE_SR:
1529 ifmr->ifm_active |= IFM_10G_SR;
1531 case I40E_PHY_TYPE_10GBASE_LR:
1532 ifmr->ifm_active |= IFM_10G_LR;
1534 case I40E_PHY_TYPE_10GBASE_T:
1535 ifmr->ifm_active |= IFM_10G_T;
1537 case I40E_PHY_TYPE_XAUI:
1538 case I40E_PHY_TYPE_XFI:
1539 ifmr->ifm_active |= IFM_10G_TWINAX;
1541 case I40E_PHY_TYPE_10GBASE_AOC:
1542 ifmr->ifm_active |= IFM_10G_AOC;
1545 case I40E_PHY_TYPE_25GBASE_KR:
1546 ifmr->ifm_active |= IFM_25G_KR;
1548 case I40E_PHY_TYPE_25GBASE_CR:
1549 ifmr->ifm_active |= IFM_25G_CR;
1551 case I40E_PHY_TYPE_25GBASE_SR:
1552 ifmr->ifm_active |= IFM_25G_SR;
1554 case I40E_PHY_TYPE_25GBASE_LR:
1555 ifmr->ifm_active |= IFM_25G_LR;
1557 case I40E_PHY_TYPE_25GBASE_AOC:
1558 ifmr->ifm_active |= IFM_25G_AOC;
1560 case I40E_PHY_TYPE_25GBASE_ACC:
1561 ifmr->ifm_active |= IFM_25G_ACC;
1564 case I40E_PHY_TYPE_40GBASE_CR4:
1565 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1566 ifmr->ifm_active |= IFM_40G_CR4;
1568 case I40E_PHY_TYPE_40GBASE_SR4:
1569 ifmr->ifm_active |= IFM_40G_SR4;
1571 case I40E_PHY_TYPE_40GBASE_LR4:
1572 ifmr->ifm_active |= IFM_40G_LR4;
1574 case I40E_PHY_TYPE_XLAUI:
1575 ifmr->ifm_active |= IFM_OTHER;
1577 case I40E_PHY_TYPE_1000BASE_KX:
1578 ifmr->ifm_active |= IFM_1000_KX;
1580 case I40E_PHY_TYPE_SGMII:
1581 ifmr->ifm_active |= IFM_1000_SGMII;
1583 /* ERJ: What's the difference between these? */
1584 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1585 case I40E_PHY_TYPE_10GBASE_CR1:
1586 ifmr->ifm_active |= IFM_10G_CR1;
1588 case I40E_PHY_TYPE_10GBASE_KX4:
1589 ifmr->ifm_active |= IFM_10G_KX4;
1591 case I40E_PHY_TYPE_10GBASE_KR:
1592 ifmr->ifm_active |= IFM_10G_KR;
1594 case I40E_PHY_TYPE_SFI:
1595 ifmr->ifm_active |= IFM_10G_SFI;
1597 /* Our single 20G media type */
1598 case I40E_PHY_TYPE_20GBASE_KR2:
1599 ifmr->ifm_active |= IFM_20G_KR2;
1601 case I40E_PHY_TYPE_40GBASE_KR4:
1602 ifmr->ifm_active |= IFM_40G_KR4;
1604 case I40E_PHY_TYPE_XLPPI:
1605 case I40E_PHY_TYPE_40GBASE_AOC:
1606 ifmr->ifm_active |= IFM_40G_XLPPI;
1608 /* Unknown to driver */
1610 ifmr->ifm_active |= IFM_UNKNOWN;
1613 /* Report flow control status as well */
1614 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1615 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1616 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1617 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1621 ixl_if_media_change(if_ctx_t ctx)
1623 struct ifmedia *ifm = iflib_get_media(ctx);
1625 INIT_DEBUGOUT("ixl_media_change: begin");
1627 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1630 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1635 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1637 struct ixl_pf *pf = iflib_get_softc(ctx);
1638 struct ixl_vsi *vsi = &pf->vsi;
1639 struct ifnet *ifp = iflib_get_ifp(ctx);
1640 struct i40e_hw *hw = vsi->hw;
1642 bool uni = FALSE, multi = FALSE;
1644 if (flags & IFF_PROMISC)
1646 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1650 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1651 vsi->seid, uni, NULL, true);
1654 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1655 vsi->seid, multi, NULL);
1660 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1662 struct ixl_pf *pf = iflib_get_softc(ctx);
1667 ixl_update_stats_counters(pf);
1671 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1673 struct ixl_pf *pf = iflib_get_softc(ctx);
1674 struct ixl_vsi *vsi = &pf->vsi;
1675 struct i40e_hw *hw = vsi->hw;
1676 if_t ifp = iflib_get_ifp(ctx);
1678 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1682 * Keep track of registered VLANS to know what
1683 * filters have to be configured when VLAN_HWFILTER
1684 * capability is enabled.
1687 bit_set(vsi->vlans_map, vtag);
1689 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1692 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1693 ixl_add_filter(vsi, hw->mac.addr, vtag);
1694 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1696 * There is not enough HW resources to add filters
1697 * for all registered VLANs. Re-configure filtering
1698 * to allow reception of all expected traffic.
1700 device_printf(vsi->dev,
1701 "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1702 ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1703 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1708 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1710 struct ixl_pf *pf = iflib_get_softc(ctx);
1711 struct ixl_vsi *vsi = &pf->vsi;
1712 struct i40e_hw *hw = vsi->hw;
1713 if_t ifp = iflib_get_ifp(ctx);
1715 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1719 bit_clear(vsi->vlans_map, vtag);
1721 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1724 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1725 ixl_del_filter(vsi, hw->mac.addr, vtag);
1726 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1727 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1728 ixl_add_vlan_filters(vsi, hw->mac.addr);
1733 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1735 struct ixl_pf *pf = iflib_get_softc(ctx);
1736 struct ixl_vsi *vsi = &pf->vsi;
1737 if_t ifp = iflib_get_ifp(ctx);
1740 case IFCOUNTER_IPACKETS:
1741 return (vsi->ipackets);
1742 case IFCOUNTER_IERRORS:
1743 return (vsi->ierrors);
1744 case IFCOUNTER_OPACKETS:
1745 return (vsi->opackets);
1746 case IFCOUNTER_OERRORS:
1747 return (vsi->oerrors);
1748 case IFCOUNTER_COLLISIONS:
1749 /* Collisions are by standard impossible in 40G/10G Ethernet */
1751 case IFCOUNTER_IBYTES:
1752 return (vsi->ibytes);
1753 case IFCOUNTER_OBYTES:
1754 return (vsi->obytes);
1755 case IFCOUNTER_IMCASTS:
1756 return (vsi->imcasts);
1757 case IFCOUNTER_OMCASTS:
1758 return (vsi->omcasts);
1759 case IFCOUNTER_IQDROPS:
1760 return (vsi->iqdrops);
1761 case IFCOUNTER_OQDROPS:
1762 return (vsi->oqdrops);
1763 case IFCOUNTER_NOPROTO:
1764 return (vsi->noproto);
1766 return (if_get_counter_default(ifp, cnt));
1772 ixl_if_vflr_handle(if_ctx_t ctx)
1774 struct ixl_pf *pf = iflib_get_softc(ctx);
1776 ixl_handle_vflr(pf);
1781 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1783 struct ixl_pf *pf = iflib_get_softc(ctx);
1785 if (pf->read_i2c_byte == NULL)
1788 for (int i = 0; i < req->len; i++)
1789 if (pf->read_i2c_byte(pf, req->offset + i,
1790 req->dev_addr, &req->data[i]))
1796 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1798 struct ixl_pf *pf = iflib_get_softc(ctx);
1799 struct ifdrv *ifd = (struct ifdrv *)data;
1803 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1804 * performing privilege checks. It is important that this function
1805 * perform the necessary checks for commands which should only be
1806 * executed by privileged threads.
1812 /* NVM update command */
1813 if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1814 error = priv_check(curthread, PRIV_DRIVER);
1817 error = ixl_handle_nvmupd_cmd(pf, ifd);
1829 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1830 * @ctx: iflib context
1831 * @event: event code to check
1833 * Defaults to returning false for every event.
1835 * @returns true if iflib needs to reinit the interface, false otherwise
1838 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1841 case IFLIB_RESTART_VLAN_CONFIG:
1848 * Sanity check and save off tunable values.
1851 ixl_save_pf_tunables(struct ixl_pf *pf)
1853 device_t dev = pf->dev;
1855 /* Save tunable information */
1857 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1860 pf->recovery_mode = ixl_debug_recovery_mode;
1862 pf->dbg_mask = ixl_core_debug_mask;
1863 pf->hw.debug_mask = ixl_shared_debug_mask;
1864 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1865 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1867 pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1868 pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1871 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1872 pf->i2c_access_method = 0;
1874 pf->i2c_access_method = ixl_i2c_access_method;
1876 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1877 device_printf(dev, "Invalid tx_itr value of %d set!\n",
1879 device_printf(dev, "tx_itr must be between %d and %d, "
1882 device_printf(dev, "Using default value of %d instead\n",
1884 pf->tx_itr = IXL_ITR_4K;
1886 pf->tx_itr = ixl_tx_itr;
1888 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1889 device_printf(dev, "Invalid rx_itr value of %d set!\n",
1891 device_printf(dev, "rx_itr must be between %d and %d, "
1894 device_printf(dev, "Using default value of %d instead\n",
1896 pf->rx_itr = IXL_ITR_8K;
1898 pf->rx_itr = ixl_rx_itr;
1901 if (ixl_flow_control != -1) {
1902 if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1904 "Invalid flow_control value of %d set!\n",
1907 "flow_control must be between %d and %d, "
1908 "inclusive\n", 0, 3);
1910 "Using default configuration instead\n");
1912 pf->fc = ixl_flow_control;