1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
40 #include "ixl_iw_int.h"
44 #include "ixl_pf_iov.h"
47 /*********************************************************************
49 *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR 2
51 #define IXL_DRIVER_VERSION_MINOR 3
52 #define IXL_DRIVER_VERSION_BUILD 0
54 #define IXL_DRIVER_VERSION_STRING \
55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \
56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \
57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
59 /*********************************************************************
62 * Used by probe to select devices to load on
64 * ( Vendor ID, Device ID, Branding String )
65 *********************************************************************/
67 static pci_vendor_info_t ixl_vendor_info_array[] =
69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 /* required last entry */
93 /*********************************************************************
95 *********************************************************************/
96 /*** IFLIB interface ***/
97 static void *ixl_register(device_t dev);
98 static int ixl_if_attach_pre(if_ctx_t ctx);
99 static int ixl_if_attach_post(if_ctx_t ctx);
100 static int ixl_if_detach(if_ctx_t ctx);
101 static int ixl_if_shutdown(if_ctx_t ctx);
102 static int ixl_if_suspend(if_ctx_t ctx);
103 static int ixl_if_resume(if_ctx_t ctx);
104 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void ixl_if_enable_intr(if_ctx_t ctx);
106 static void ixl_if_disable_intr(if_ctx_t ctx);
107 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void ixl_if_queues_free(if_ctx_t ctx);
112 static void ixl_if_update_admin_status(if_ctx_t ctx);
113 static void ixl_if_multi_set(if_ctx_t ctx);
114 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int ixl_if_media_change(if_ctx_t ctx);
117 static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 static void ixl_if_vflr_handle(if_ctx_t ctx);
130 static u_int ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
131 static void ixl_save_pf_tunables(struct ixl_pf *);
132 static int ixl_allocate_pci_resources(struct ixl_pf *);
133 static void ixl_setup_ssctx(struct ixl_pf *pf);
134 static void ixl_admin_timer(void *arg);
136 /*********************************************************************
137 * FreeBSD Device Interface Entry Points
138 *********************************************************************/
140 static device_method_t ixl_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_register, ixl_register),
143 DEVMETHOD(device_probe, iflib_device_probe),
144 DEVMETHOD(device_attach, iflib_device_attach),
145 DEVMETHOD(device_detach, iflib_device_detach),
146 DEVMETHOD(device_shutdown, iflib_device_shutdown),
148 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
149 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
150 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
155 static driver_t ixl_driver = {
156 "ixl", ixl_methods, sizeof(struct ixl_pf),
159 devclass_t ixl_devclass;
160 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
161 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
162 MODULE_VERSION(ixl, 3);
164 MODULE_DEPEND(ixl, pci, 1, 1, 1);
165 MODULE_DEPEND(ixl, ether, 1, 1, 1);
166 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
168 static device_method_t ixl_if_methods[] = {
169 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
170 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
171 DEVMETHOD(ifdi_detach, ixl_if_detach),
172 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
173 DEVMETHOD(ifdi_suspend, ixl_if_suspend),
174 DEVMETHOD(ifdi_resume, ixl_if_resume),
175 DEVMETHOD(ifdi_init, ixl_if_init),
176 DEVMETHOD(ifdi_stop, ixl_if_stop),
177 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
178 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
179 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
180 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
181 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
182 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
183 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
184 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
185 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
186 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
187 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
188 DEVMETHOD(ifdi_media_status, ixl_if_media_status),
189 DEVMETHOD(ifdi_media_change, ixl_if_media_change),
190 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
191 DEVMETHOD(ifdi_timer, ixl_if_timer),
192 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
193 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
194 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
195 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
196 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
197 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
199 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
200 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
201 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
202 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
209 static driver_t ixl_if_driver = {
210 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
214 ** TUNEABLE PARAMETERS:
217 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
218 "ixl driver parameters");
222 * Leave this on unless you need to send flow control
223 * frames (or other control frames) from software
225 static int ixl_enable_tx_fc_filter = 1;
226 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
227 &ixl_enable_tx_fc_filter);
228 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
229 &ixl_enable_tx_fc_filter, 0,
230 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
234 static int ixl_debug_recovery_mode = 0;
235 TUNABLE_INT("hw.ixl.debug_recovery_mode",
236 &ixl_debug_recovery_mode);
237 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
238 &ixl_debug_recovery_mode, 0,
239 "Act like when FW entered recovery mode (for debuging)");
242 static int ixl_i2c_access_method = 0;
243 TUNABLE_INT("hw.ixl.i2c_access_method",
244 &ixl_i2c_access_method);
245 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
246 &ixl_i2c_access_method, 0,
247 IXL_SYSCTL_HELP_I2C_METHOD);
249 static int ixl_enable_vf_loopback = 1;
250 TUNABLE_INT("hw.ixl.enable_vf_loopback",
251 &ixl_enable_vf_loopback);
252 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
253 &ixl_enable_vf_loopback, 0,
254 IXL_SYSCTL_HELP_VF_LOOPBACK);
257 * Different method for processing TX descriptor
260 static int ixl_enable_head_writeback = 1;
261 TUNABLE_INT("hw.ixl.enable_head_writeback",
262 &ixl_enable_head_writeback);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
264 &ixl_enable_head_writeback, 0,
265 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
267 static int ixl_core_debug_mask = 0;
268 TUNABLE_INT("hw.ixl.core_debug_mask",
269 &ixl_core_debug_mask);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
271 &ixl_core_debug_mask, 0,
272 "Display debug statements that are printed in non-shared code");
274 static int ixl_shared_debug_mask = 0;
275 TUNABLE_INT("hw.ixl.shared_debug_mask",
276 &ixl_shared_debug_mask);
277 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
278 &ixl_shared_debug_mask, 0,
279 "Display debug statements that are printed in shared code");
283 ** Controls for Interrupt Throttling
284 ** - true/false for dynamic adjustment
285 ** - default values for static ITR
287 static int ixl_dynamic_rx_itr = 0;
288 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
289 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
290 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
292 static int ixl_dynamic_tx_itr = 0;
293 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
294 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
295 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
298 static int ixl_rx_itr = IXL_ITR_8K;
299 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
300 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
301 &ixl_rx_itr, 0, "RX Interrupt Rate");
303 static int ixl_tx_itr = IXL_ITR_4K;
304 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
305 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
306 &ixl_tx_itr, 0, "TX Interrupt Rate");
309 int ixl_enable_iwarp = 0;
310 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
311 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
312 &ixl_enable_iwarp, 0, "iWARP enabled");
314 #if __FreeBSD_version < 1100000
315 int ixl_limit_iwarp_msix = 1;
317 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
319 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
320 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
321 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
324 extern struct if_txrx ixl_txrx_hwb;
325 extern struct if_txrx ixl_txrx_dwb;
327 static struct if_shared_ctx ixl_sctx_init = {
328 .isc_magic = IFLIB_MAGIC,
329 .isc_q_align = PAGE_SIZE,
330 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
331 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
332 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
333 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
334 .isc_rx_maxsize = 16384,
335 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
336 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
341 .isc_admin_intrcnt = 1,
342 .isc_vendor_info = ixl_vendor_info_array,
343 .isc_driver_version = IXL_DRIVER_VERSION_STRING,
344 .isc_driver = &ixl_if_driver,
345 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
347 .isc_nrxd_min = {IXL_MIN_RING},
348 .isc_ntxd_min = {IXL_MIN_RING},
349 .isc_nrxd_max = {IXL_MAX_RING},
350 .isc_ntxd_max = {IXL_MAX_RING},
351 .isc_nrxd_default = {IXL_DEFAULT_RING},
352 .isc_ntxd_default = {IXL_DEFAULT_RING},
355 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
359 ixl_register(device_t dev)
365 ixl_allocate_pci_resources(struct ixl_pf *pf)
367 device_t dev = iflib_get_dev(pf->vsi.ctx);
368 struct i40e_hw *hw = &pf->hw;
373 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
376 if (!(pf->pci_mem)) {
377 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
381 /* Save off the PCI information */
382 hw->vendor_id = pci_get_vendor(dev);
383 hw->device_id = pci_get_device(dev);
384 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
385 hw->subsystem_vendor_id =
386 pci_read_config(dev, PCIR_SUBVEND_0, 2);
387 hw->subsystem_device_id =
388 pci_read_config(dev, PCIR_SUBDEV_0, 2);
390 hw->bus.device = pci_get_slot(dev);
391 hw->bus.func = pci_get_function(dev);
393 /* Save off register access information */
394 pf->osdep.mem_bus_space_tag =
395 rman_get_bustag(pf->pci_mem);
396 pf->osdep.mem_bus_space_handle =
397 rman_get_bushandle(pf->pci_mem);
398 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
399 pf->osdep.flush_reg = I40E_GLGEN_STAT;
402 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
403 pf->hw.back = &pf->osdep;
409 ixl_setup_ssctx(struct ixl_pf *pf)
411 if_softc_ctx_t scctx = pf->vsi.shared;
412 struct i40e_hw *hw = &pf->hw;
414 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
415 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
416 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
417 } else if (hw->mac.type == I40E_MAC_X722)
418 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
420 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
422 if (pf->vsi.enable_head_writeback) {
423 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
425 scctx->isc_txrx = &ixl_txrx_hwb;
427 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
428 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
429 scctx->isc_txrx = &ixl_txrx_dwb;
432 scctx->isc_txrx->ift_legacy_intr = ixl_intr;
433 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
434 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
435 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
436 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
437 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
438 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
439 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
440 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
441 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
442 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
446 ixl_admin_timer(void *arg)
448 struct ixl_pf *pf = (struct ixl_pf *)arg;
450 /* Fire off the admin task */
451 iflib_admin_intr_deferred(pf->vsi.ctx);
453 /* Reschedule the admin timer */
454 callout_schedule(&pf->admin_timer, hz/2);
458 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
460 struct ixl_vsi *vsi = &pf->vsi;
461 struct i40e_hw *hw = &pf->hw;
462 device_t dev = pf->dev;
464 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
466 i40e_get_mac_addr(hw, hw->mac.addr);
468 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
469 ixl_configure_intr0_msix(pf);
470 ixl_enable_intr0(hw);
479 ixl_if_attach_pre(if_ctx_t ctx)
485 enum i40e_get_fw_lldp_status_resp lldp_status;
486 struct i40e_filter_control_settings filter;
487 enum i40e_status_code status;
490 dev = iflib_get_dev(ctx);
491 pf = iflib_get_softc(ctx);
493 INIT_DBG_DEV(dev, "begin");
505 vsi->media = iflib_get_media(ctx);
506 vsi->shared = iflib_get_softc_ctx(ctx);
508 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
509 "%s:admin", device_get_nameunit(dev));
510 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
511 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
513 /* Save tunable values */
514 ixl_save_pf_tunables(pf);
516 /* Do PCI setup - map BAR0, etc */
517 if (ixl_allocate_pci_resources(pf)) {
518 device_printf(dev, "Allocation of PCI resources failed\n");
523 /* Establish a clean starting point */
525 i40e_set_mac_type(hw);
527 error = ixl_pf_reset(pf);
531 /* Initialize the shared code */
532 status = i40e_init_shared_code(hw);
534 device_printf(dev, "Unable to initialize shared code, error %s\n",
535 i40e_stat_str(hw, status));
540 /* Set up the admin queue */
541 hw->aq.num_arq_entries = IXL_AQ_LEN;
542 hw->aq.num_asq_entries = IXL_AQ_LEN;
543 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
544 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
546 status = i40e_init_adminq(hw);
547 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
548 device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
549 i40e_stat_str(hw, status));
553 ixl_print_nvm_version(pf);
555 if (status == I40E_ERR_FIRMWARE_API_VERSION) {
556 device_printf(dev, "The driver for the device stopped "
557 "because the NVM image is newer than expected.\n");
558 device_printf(dev, "You must install the most recent version of "
559 "the network driver.\n");
564 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
565 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
566 device_printf(dev, "The driver for the device detected "
567 "a newer version of the NVM image than expected.\n");
568 device_printf(dev, "Please install the most recent version "
569 "of the network driver.\n");
570 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
571 device_printf(dev, "The driver for the device detected "
572 "an older version of the NVM image than expected.\n");
573 device_printf(dev, "Please update the NVM image.\n");
576 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
577 error = ixl_attach_pre_recovery_mode(pf);
584 i40e_clear_pxe_mode(hw);
586 /* Get capabilities from the device */
587 error = ixl_get_hw_capabilities(pf);
589 device_printf(dev, "get_hw_capabilities failed: %d\n",
594 /* Set up host memory cache */
595 error = ixl_setup_hmc(pf);
599 /* Disable LLDP from the firmware for certain NVM versions */
600 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
601 (pf->hw.aq.fw_maj_ver < 4)) {
602 i40e_aq_stop_lldp(hw, true, false, NULL);
603 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
606 /* Try enabling Energy Efficient Ethernet (EEE) mode */
607 if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
608 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
610 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
612 /* Get MAC addresses from hardware */
613 i40e_get_mac_addr(hw, hw->mac.addr);
614 error = i40e_validate_mac_addr(hw->mac.addr);
616 device_printf(dev, "validate_mac_addr failed: %d\n", error);
619 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
620 iflib_set_mac(ctx, hw->mac.addr);
621 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
623 /* Set up the device filtering */
624 bzero(&filter, sizeof(filter));
625 filter.enable_ethtype = TRUE;
626 filter.enable_macvlan = TRUE;
627 filter.enable_fdir = FALSE;
628 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
629 if (i40e_set_filter_control(hw, &filter))
630 device_printf(dev, "i40e_set_filter_control() failed\n");
632 /* Query device FW LLDP status */
633 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
634 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
635 atomic_set_32(&pf->state,
636 IXL_PF_STATE_FW_LLDP_DISABLED);
638 atomic_clear_32(&pf->state,
639 IXL_PF_STATE_FW_LLDP_DISABLED);
643 /* Tell FW to apply DCB config on link up */
644 i40e_aq_set_dcb_parameters(hw, true, NULL);
646 /* Fill out iflib parameters */
649 INIT_DBG_DEV(dev, "end");
653 ixl_shutdown_hmc(pf);
655 i40e_shutdown_adminq(hw);
657 ixl_free_pci_resources(pf);
659 mtx_lock(&pf->admin_mtx);
660 callout_stop(&pf->admin_timer);
661 mtx_unlock(&pf->admin_mtx);
662 mtx_destroy(&pf->admin_mtx);
667 ixl_if_attach_post(if_ctx_t ctx)
674 enum i40e_status_code status;
676 dev = iflib_get_dev(ctx);
677 pf = iflib_get_softc(ctx);
679 INIT_DBG_DEV(dev, "begin");
682 vsi->ifp = iflib_get_ifp(ctx);
685 /* Save off determined number of queues for interface */
686 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
687 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
689 /* Setup OS network interface / ifnet */
690 if (ixl_setup_interface(dev, pf)) {
691 device_printf(dev, "interface setup failed!\n");
696 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
697 /* Keep admin queue interrupts active while driver is loaded */
698 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
699 ixl_configure_intr0_msix(pf);
700 ixl_enable_intr0(hw);
703 ixl_add_sysctls_recovery_mode(pf);
705 /* Start the admin timer */
706 mtx_lock(&pf->admin_mtx);
707 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
708 mtx_unlock(&pf->admin_mtx);
712 /* Determine link state */
713 if (ixl_attach_get_link_status(pf)) {
718 error = ixl_switch_config(pf);
720 device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
725 /* Add protocol filters to list */
726 ixl_init_filters(vsi);
728 /* Init queue allocation manager */
729 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
731 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
735 /* reserve a contiguous allocation for the PF's VSI */
736 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
737 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
739 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
743 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
744 pf->qtag.num_allocated, pf->qtag.num_active);
746 /* Limit PHY interrupts to link, autoneg, and modules failure */
747 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
750 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
751 " aq_err %s\n", i40e_stat_str(hw, status),
752 i40e_aq_str(hw, hw->aq.asq_last_status));
756 /* Get the bus configuration and set the shared code */
757 ixl_get_bus_info(pf);
759 /* Keep admin queue interrupts active while driver is loaded */
760 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
761 ixl_configure_intr0_msix(pf);
762 ixl_enable_intr0(hw);
765 /* Set initial advertised speed sysctl value */
766 ixl_set_initial_advertised_speeds(pf);
768 /* Initialize statistics & add sysctls */
769 ixl_add_device_sysctls(pf);
770 ixl_pf_reset_stats(pf);
771 ixl_update_stats_counters(pf);
772 ixl_add_hw_stats(pf);
774 hw->phy.get_link_info = true;
775 i40e_get_link_status(hw, &pf->link_up);
776 ixl_update_link_status(pf);
779 ixl_initialize_sriov(pf);
783 if (hw->func_caps.iwarp && ixl_enable_iwarp) {
784 pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
785 if (pf->iw_enabled) {
786 error = ixl_iw_pf_attach(pf);
789 "interfacing to iWARP driver failed: %d\n",
793 device_printf(dev, "iWARP ready\n");
795 device_printf(dev, "iWARP disabled on this device "
796 "(no MSI-X vectors)\n");
798 pf->iw_enabled = false;
799 device_printf(dev, "The device is not iWARP enabled\n");
802 /* Start the admin timer */
803 mtx_lock(&pf->admin_mtx);
804 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
805 mtx_unlock(&pf->admin_mtx);
807 INIT_DBG_DEV(dev, "end");
811 INIT_DEBUGOUT("end: error %d", error);
812 /* ixl_if_detach() is called on error from this */
817 * XXX: iflib always ignores the return value of detach()
818 * -> This means that this isn't allowed to fail
821 ixl_if_detach(if_ctx_t ctx)
823 struct ixl_pf *pf = iflib_get_softc(ctx);
824 struct ixl_vsi *vsi = &pf->vsi;
825 struct i40e_hw *hw = &pf->hw;
826 device_t dev = pf->dev;
827 enum i40e_status_code status;
832 INIT_DBG_DEV(dev, "begin");
834 /* Stop the admin timer */
835 mtx_lock(&pf->admin_mtx);
836 callout_stop(&pf->admin_timer);
837 mtx_unlock(&pf->admin_mtx);
838 mtx_destroy(&pf->admin_mtx);
841 if (ixl_enable_iwarp && pf->iw_enabled) {
842 error = ixl_iw_pf_detach(pf);
843 if (error == EBUSY) {
844 device_printf(dev, "iwarp in use; stop it first.\n");
849 /* Remove all previously allocated media types */
850 ifmedia_removeall(vsi->media);
852 /* Shutdown LAN HMC */
853 ixl_shutdown_hmc(pf);
855 /* Shutdown admin queue */
856 ixl_disable_intr0(hw);
857 status = i40e_shutdown_adminq(hw);
860 "i40e_shutdown_adminq() failed with status %s\n",
861 i40e_stat_str(hw, status));
863 ixl_pf_qmgr_destroy(&pf->qmgr);
864 ixl_free_pci_resources(pf);
865 ixl_free_mac_filters(vsi);
866 INIT_DBG_DEV(dev, "end");
871 ixl_if_shutdown(if_ctx_t ctx)
875 INIT_DEBUGOUT("ixl_if_shutdown: begin");
877 /* TODO: Call ixl_if_stop()? */
879 /* TODO: Then setup low power mode */
885 ixl_if_suspend(if_ctx_t ctx)
889 INIT_DEBUGOUT("ixl_if_suspend: begin");
891 /* TODO: Call ixl_if_stop()? */
893 /* TODO: Then setup low power mode */
899 ixl_if_resume(if_ctx_t ctx)
901 struct ifnet *ifp = iflib_get_ifp(ctx);
903 INIT_DEBUGOUT("ixl_if_resume: begin");
905 /* Read & clear wake-up registers */
907 /* Required after D3->D0 transition */
908 if (ifp->if_flags & IFF_UP)
915 ixl_if_init(if_ctx_t ctx)
917 struct ixl_pf *pf = iflib_get_softc(ctx);
918 struct ixl_vsi *vsi = &pf->vsi;
919 struct i40e_hw *hw = &pf->hw;
920 struct ifnet *ifp = iflib_get_ifp(ctx);
921 device_t dev = iflib_get_dev(ctx);
922 u8 tmpaddr[ETHER_ADDR_LEN];
925 if (IXL_PF_IN_RECOVERY_MODE(pf))
928 * If the aq is dead here, it probably means something outside of the driver
929 * did something to the adapter, like a PF reset.
930 * So, rebuild the driver's state here if that occurs.
932 if (!i40e_check_asq_alive(&pf->hw)) {
933 device_printf(dev, "Admin Queue is down; resetting...\n");
934 ixl_teardown_hw_structs(pf);
935 ixl_rebuild_hw_structs_after_reset(pf, false);
938 /* Get the latest mac address... User might use a LAA */
939 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
940 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
941 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
942 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
943 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
944 ret = i40e_aq_mac_address_write(hw,
945 I40E_AQC_WRITE_TYPE_LAA_ONLY,
948 device_printf(dev, "LLA address change failed!!\n");
951 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
954 iflib_set_mac(ctx, hw->mac.addr);
956 /* Prepare the VSI: rings, hmc contexts, etc... */
957 if (ixl_initialize_vsi(vsi)) {
958 device_printf(dev, "initialize vsi failed!!\n");
962 /* Reconfigure multicast filters in HW */
963 ixl_if_multi_set(ctx);
968 /* Set up MSI-X routing and the ITR settings */
969 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
970 ixl_configure_queue_intr_msix(pf);
971 ixl_configure_itr(pf);
973 ixl_configure_legacy(pf);
975 if (vsi->enable_head_writeback)
976 ixl_init_tx_cidx(vsi);
978 ixl_init_tx_rsqs(vsi);
980 ixl_enable_rings(vsi);
982 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
984 /* Re-add configure filters to HW */
985 ixl_reconfigure_filters(vsi);
987 /* Configure promiscuous mode */
988 ixl_if_promisc_set(ctx, if_getflags(ifp));
991 if (ixl_enable_iwarp && pf->iw_enabled) {
992 ret = ixl_iw_pf_init(pf);
995 "initialize iwarp failed, code %d\n", ret);
1001 ixl_if_stop(if_ctx_t ctx)
1003 struct ixl_pf *pf = iflib_get_softc(ctx);
1004 struct ixl_vsi *vsi = &pf->vsi;
1006 INIT_DEBUGOUT("ixl_if_stop: begin\n");
1008 if (IXL_PF_IN_RECOVERY_MODE(pf))
1011 // TODO: This may need to be reworked
1013 /* Stop iWARP device */
1014 if (ixl_enable_iwarp && pf->iw_enabled)
1018 ixl_disable_rings_intr(vsi);
1019 ixl_disable_rings(pf, vsi, &pf->qtag);
1023 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1025 struct ixl_pf *pf = iflib_get_softc(ctx);
1026 struct ixl_vsi *vsi = &pf->vsi;
1027 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1028 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1029 int err, i, rid, vector = 0;
1032 MPASS(vsi->shared->isc_nrxqsets > 0);
1033 MPASS(vsi->shared->isc_ntxqsets > 0);
1035 /* Admin Que must use vector 0*/
1037 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1038 ixl_msix_adminq, pf, 0, "aq");
1040 iflib_irq_free(ctx, &vsi->irq);
1041 device_printf(iflib_get_dev(ctx),
1042 "Failed to register Admin Que handler");
1045 /* Create soft IRQ for handling VFLRs */
1046 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1048 /* Now set up the stations */
1049 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1052 snprintf(buf, sizeof(buf), "rxq%d", i);
1053 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1054 IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1055 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1056 * what's expected in the iflib context? */
1058 device_printf(iflib_get_dev(ctx),
1059 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1060 vsi->num_rx_queues = i + 1;
1063 rx_que->msix = vector;
1066 bzero(buf, sizeof(buf));
1068 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1069 snprintf(buf, sizeof(buf), "txq%d", i);
1070 iflib_softirq_alloc_generic(ctx,
1071 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1072 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1074 /* TODO: Maybe call a strategy function for this to figure out which
1075 * interrupts to map Tx queues to. I don't know if there's an immediately
1076 * better way than this other than a user-supplied map, though. */
1077 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1082 iflib_irq_free(ctx, &vsi->irq);
1083 rx_que = vsi->rx_queues;
1084 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1085 iflib_irq_free(ctx, &rx_que->que_irq);
1090 * Enable all interrupts
1093 * iflib_init_locked, after ixl_if_init()
1096 ixl_if_enable_intr(if_ctx_t ctx)
1098 struct ixl_pf *pf = iflib_get_softc(ctx);
1099 struct ixl_vsi *vsi = &pf->vsi;
1100 struct i40e_hw *hw = vsi->hw;
1101 struct ixl_rx_queue *que = vsi->rx_queues;
1103 ixl_enable_intr0(hw);
1104 /* Enable queue interrupts */
1105 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1106 /* TODO: Queue index parameter is probably wrong */
1107 ixl_enable_queue(hw, que->rxr.me);
1111 * Disable queue interrupts
1113 * Other interrupt causes need to remain active.
1116 ixl_if_disable_intr(if_ctx_t ctx)
1118 struct ixl_pf *pf = iflib_get_softc(ctx);
1119 struct ixl_vsi *vsi = &pf->vsi;
1120 struct i40e_hw *hw = vsi->hw;
1121 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1123 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1124 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1125 ixl_disable_queue(hw, rx_que->msix - 1);
1127 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1128 // stops queues from triggering interrupts
1129 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1134 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1136 struct ixl_pf *pf = iflib_get_softc(ctx);
1137 struct ixl_vsi *vsi = &pf->vsi;
1138 struct i40e_hw *hw = vsi->hw;
1139 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
1141 ixl_enable_queue(hw, rx_que->msix - 1);
1146 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1148 struct ixl_pf *pf = iflib_get_softc(ctx);
1149 struct ixl_vsi *vsi = &pf->vsi;
1150 struct i40e_hw *hw = vsi->hw;
1151 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1153 ixl_enable_queue(hw, tx_que->msix - 1);
1158 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1160 struct ixl_pf *pf = iflib_get_softc(ctx);
1161 struct ixl_vsi *vsi = &pf->vsi;
1162 if_softc_ctx_t scctx = vsi->shared;
1163 struct ixl_tx_queue *que;
1164 int i, j, error = 0;
1166 MPASS(scctx->isc_ntxqsets > 0);
1168 MPASS(scctx->isc_ntxqsets == ntxqsets);
1170 /* Allocate queue structure memory */
1171 if (!(vsi->tx_queues =
1172 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1173 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1177 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1178 struct tx_ring *txr = &que->txr;
1183 if (!vsi->enable_head_writeback) {
1184 /* Allocate report status array */
1185 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1186 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1190 /* Init report status array */
1191 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1192 txr->tx_rsq[j] = QIDX_INVALID;
1194 /* get the virtual and physical address of the hardware queues */
1195 txr->tail = I40E_QTX_TAIL(txr->me);
1196 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1197 txr->tx_paddr = paddrs[i * ntxqs];
1203 ixl_if_queues_free(ctx);
1208 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1210 struct ixl_pf *pf = iflib_get_softc(ctx);
1211 struct ixl_vsi *vsi = &pf->vsi;
1212 struct ixl_rx_queue *que;
1216 if_softc_ctx_t scctx = vsi->shared;
1217 MPASS(scctx->isc_nrxqsets > 0);
1219 MPASS(scctx->isc_nrxqsets == nrxqsets);
1222 /* Allocate queue structure memory */
1223 if (!(vsi->rx_queues =
1224 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1225 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1226 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1231 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1232 struct rx_ring *rxr = &que->rxr;
1237 /* get the virtual and physical address of the hardware queues */
1238 rxr->tail = I40E_QRX_TAIL(rxr->me);
1239 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1240 rxr->rx_paddr = paddrs[i * nrxqs];
1246 ixl_if_queues_free(ctx);
1251 ixl_if_queues_free(if_ctx_t ctx)
1253 struct ixl_pf *pf = iflib_get_softc(ctx);
1254 struct ixl_vsi *vsi = &pf->vsi;
1256 if (!vsi->enable_head_writeback) {
1257 struct ixl_tx_queue *que;
1260 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1261 struct tx_ring *txr = &que->txr;
1262 if (txr->tx_rsq != NULL) {
1263 free(txr->tx_rsq, M_IXL);
1269 if (vsi->tx_queues != NULL) {
1270 free(vsi->tx_queues, M_IXL);
1271 vsi->tx_queues = NULL;
1273 if (vsi->rx_queues != NULL) {
1274 free(vsi->rx_queues, M_IXL);
1275 vsi->rx_queues = NULL;
1278 if (!IXL_PF_IN_RECOVERY_MODE(pf))
1279 sysctl_ctx_free(&vsi->sysctl_ctx);
1283 ixl_update_link_status(struct ixl_pf *pf)
1285 struct ixl_vsi *vsi = &pf->vsi;
1286 struct i40e_hw *hw = &pf->hw;
1290 if (vsi->link_active == FALSE) {
1291 vsi->link_active = TRUE;
1292 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1293 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1294 ixl_link_up_msg(pf);
1296 ixl_broadcast_link_state(pf);
1299 } else { /* Link down */
1300 if (vsi->link_active == TRUE) {
1301 vsi->link_active = FALSE;
1302 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1304 ixl_broadcast_link_state(pf);
1311 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1313 device_t dev = pf->dev;
1314 u32 rxq_idx, qtx_ctl;
1316 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1317 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1318 qtx_ctl = e->desc.params.external.param1;
1320 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1321 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1325 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1327 enum i40e_status_code status = I40E_SUCCESS;
1328 struct i40e_arq_event_info event;
1329 struct i40e_hw *hw = &pf->hw;
1330 device_t dev = pf->dev;
1334 event.buf_len = IXL_AQ_BUF_SZ;
1335 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1336 if (!event.msg_buf) {
1337 device_printf(dev, "%s: Unable to allocate memory for Admin"
1338 " Queue event!\n", __func__);
1342 /* clean and process any events */
1344 status = i40e_clean_arq_element(hw, &event, pending);
1347 opcode = LE16_TO_CPU(event.desc.opcode);
1348 ixl_dbg(pf, IXL_DBG_AQ,
1349 "Admin Queue event: %#06x\n", opcode);
1351 case i40e_aqc_opc_get_link_status:
1352 ixl_link_event(pf, &event);
1354 case i40e_aqc_opc_send_msg_to_pf:
1356 ixl_handle_vf_msg(pf, &event);
1360 * This should only occur on no-drop queues, which
1361 * aren't currently configured.
1363 case i40e_aqc_opc_event_lan_overflow:
1364 ixl_handle_lan_overflow_event(pf, &event);
1369 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1371 free(event.msg_buf, M_IXL);
1373 /* Re-enable admin queue interrupt cause */
1374 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1375 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1376 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1382 ixl_if_update_admin_status(if_ctx_t ctx)
1384 struct ixl_pf *pf = iflib_get_softc(ctx);
1385 struct i40e_hw *hw = &pf->hw;
1388 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1389 ixl_handle_empr_reset(pf);
1392 * Admin Queue is shut down while handling reset.
1393 * Don't proceed if it hasn't been re-initialized
1394 * e.g due to an issue with new FW.
1396 if (!i40e_check_asq_alive(&pf->hw))
1399 if (pf->state & IXL_PF_STATE_MDD_PENDING)
1400 ixl_handle_mdd_event(pf);
1402 ixl_process_adminq(pf, &pending);
1403 ixl_update_link_status(pf);
1406 * If there are still messages to process, reschedule ourselves.
1407 * Otherwise, re-enable our interrupt and go to sleep.
1410 iflib_admin_intr_deferred(ctx);
1412 ixl_enable_intr0(hw);
1416 ixl_if_multi_set(if_ctx_t ctx)
1418 struct ixl_pf *pf = iflib_get_softc(ctx);
1419 struct ixl_vsi *vsi = &pf->vsi;
1420 struct i40e_hw *hw = vsi->hw;
1424 IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1426 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1427 /* Delete filters for removed multicast addresses */
1428 del_mcnt = ixl_del_multi(vsi);
1429 vsi->num_macs -= del_mcnt;
1431 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1432 i40e_aq_set_vsi_multicast_promiscuous(hw,
1433 vsi->seid, TRUE, NULL);
1436 /* (re-)install filters for all mcast addresses */
1437 /* XXX: This bypasses filter count tracking code! */
1438 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1440 vsi->num_macs += mcnt;
1441 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1442 ixl_add_hw_filters(vsi, flags, mcnt);
1445 ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1446 __func__, vsi->num_macs);
1447 IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1451 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1453 struct ixl_pf *pf = iflib_get_softc(ctx);
1454 struct ixl_vsi *vsi = &pf->vsi;
1456 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1457 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1458 ETHER_VLAN_ENCAP_LEN)
1461 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1462 ETHER_VLAN_ENCAP_LEN;
1468 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1470 struct ixl_pf *pf = iflib_get_softc(ctx);
1471 struct i40e_hw *hw = &pf->hw;
1473 INIT_DEBUGOUT("ixl_media_status: begin");
1475 ifmr->ifm_status = IFM_AVALID;
1476 ifmr->ifm_active = IFM_ETHER;
1482 ifmr->ifm_status |= IFM_ACTIVE;
1483 /* Hardware is always full-duplex */
1484 ifmr->ifm_active |= IFM_FDX;
1486 switch (hw->phy.link_info.phy_type) {
1488 case I40E_PHY_TYPE_100BASE_TX:
1489 ifmr->ifm_active |= IFM_100_TX;
1492 case I40E_PHY_TYPE_1000BASE_T:
1493 ifmr->ifm_active |= IFM_1000_T;
1495 case I40E_PHY_TYPE_1000BASE_SX:
1496 ifmr->ifm_active |= IFM_1000_SX;
1498 case I40E_PHY_TYPE_1000BASE_LX:
1499 ifmr->ifm_active |= IFM_1000_LX;
1501 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1502 ifmr->ifm_active |= IFM_1000_T;
1505 case I40E_PHY_TYPE_2_5GBASE_T:
1506 ifmr->ifm_active |= IFM_2500_T;
1509 case I40E_PHY_TYPE_5GBASE_T:
1510 ifmr->ifm_active |= IFM_5000_T;
1513 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1514 ifmr->ifm_active |= IFM_10G_TWINAX;
1516 case I40E_PHY_TYPE_10GBASE_SR:
1517 ifmr->ifm_active |= IFM_10G_SR;
1519 case I40E_PHY_TYPE_10GBASE_LR:
1520 ifmr->ifm_active |= IFM_10G_LR;
1522 case I40E_PHY_TYPE_10GBASE_T:
1523 ifmr->ifm_active |= IFM_10G_T;
1525 case I40E_PHY_TYPE_XAUI:
1526 case I40E_PHY_TYPE_XFI:
1527 ifmr->ifm_active |= IFM_10G_TWINAX;
1529 case I40E_PHY_TYPE_10GBASE_AOC:
1530 ifmr->ifm_active |= IFM_10G_AOC;
1533 case I40E_PHY_TYPE_25GBASE_KR:
1534 ifmr->ifm_active |= IFM_25G_KR;
1536 case I40E_PHY_TYPE_25GBASE_CR:
1537 ifmr->ifm_active |= IFM_25G_CR;
1539 case I40E_PHY_TYPE_25GBASE_SR:
1540 ifmr->ifm_active |= IFM_25G_SR;
1542 case I40E_PHY_TYPE_25GBASE_LR:
1543 ifmr->ifm_active |= IFM_25G_LR;
1545 case I40E_PHY_TYPE_25GBASE_AOC:
1546 ifmr->ifm_active |= IFM_25G_AOC;
1548 case I40E_PHY_TYPE_25GBASE_ACC:
1549 ifmr->ifm_active |= IFM_25G_ACC;
1552 case I40E_PHY_TYPE_40GBASE_CR4:
1553 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1554 ifmr->ifm_active |= IFM_40G_CR4;
1556 case I40E_PHY_TYPE_40GBASE_SR4:
1557 ifmr->ifm_active |= IFM_40G_SR4;
1559 case I40E_PHY_TYPE_40GBASE_LR4:
1560 ifmr->ifm_active |= IFM_40G_LR4;
1562 case I40E_PHY_TYPE_XLAUI:
1563 ifmr->ifm_active |= IFM_OTHER;
1565 case I40E_PHY_TYPE_1000BASE_KX:
1566 ifmr->ifm_active |= IFM_1000_KX;
1568 case I40E_PHY_TYPE_SGMII:
1569 ifmr->ifm_active |= IFM_1000_SGMII;
1571 /* ERJ: What's the difference between these? */
1572 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1573 case I40E_PHY_TYPE_10GBASE_CR1:
1574 ifmr->ifm_active |= IFM_10G_CR1;
1576 case I40E_PHY_TYPE_10GBASE_KX4:
1577 ifmr->ifm_active |= IFM_10G_KX4;
1579 case I40E_PHY_TYPE_10GBASE_KR:
1580 ifmr->ifm_active |= IFM_10G_KR;
1582 case I40E_PHY_TYPE_SFI:
1583 ifmr->ifm_active |= IFM_10G_SFI;
1585 /* Our single 20G media type */
1586 case I40E_PHY_TYPE_20GBASE_KR2:
1587 ifmr->ifm_active |= IFM_20G_KR2;
1589 case I40E_PHY_TYPE_40GBASE_KR4:
1590 ifmr->ifm_active |= IFM_40G_KR4;
1592 case I40E_PHY_TYPE_XLPPI:
1593 case I40E_PHY_TYPE_40GBASE_AOC:
1594 ifmr->ifm_active |= IFM_40G_XLPPI;
1596 /* Unknown to driver */
1598 ifmr->ifm_active |= IFM_UNKNOWN;
1601 /* Report flow control status as well */
1602 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1603 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1604 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1605 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1609 ixl_if_media_change(if_ctx_t ctx)
1611 struct ifmedia *ifm = iflib_get_media(ctx);
1613 INIT_DEBUGOUT("ixl_media_change: begin");
1615 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1618 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1623 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1625 struct ixl_pf *pf = iflib_get_softc(ctx);
1626 struct ixl_vsi *vsi = &pf->vsi;
1627 struct ifnet *ifp = iflib_get_ifp(ctx);
1628 struct i40e_hw *hw = vsi->hw;
1630 bool uni = FALSE, multi = FALSE;
1632 if (flags & IFF_PROMISC)
1634 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1638 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1639 vsi->seid, uni, NULL, true);
1642 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1643 vsi->seid, multi, NULL);
1648 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1650 struct ixl_pf *pf = iflib_get_softc(ctx);
1655 ixl_update_stats_counters(pf);
1659 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1661 struct ixl_pf *pf = iflib_get_softc(ctx);
1662 struct ixl_vsi *vsi = &pf->vsi;
1663 struct i40e_hw *hw = vsi->hw;
1665 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1669 ixl_add_filter(vsi, hw->mac.addr, vtag);
1673 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1675 struct ixl_pf *pf = iflib_get_softc(ctx);
1676 struct ixl_vsi *vsi = &pf->vsi;
1677 struct i40e_hw *hw = vsi->hw;
1679 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1683 ixl_del_filter(vsi, hw->mac.addr, vtag);
1687 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1689 struct ixl_pf *pf = iflib_get_softc(ctx);
1690 struct ixl_vsi *vsi = &pf->vsi;
1691 if_t ifp = iflib_get_ifp(ctx);
1694 case IFCOUNTER_IPACKETS:
1695 return (vsi->ipackets);
1696 case IFCOUNTER_IERRORS:
1697 return (vsi->ierrors);
1698 case IFCOUNTER_OPACKETS:
1699 return (vsi->opackets);
1700 case IFCOUNTER_OERRORS:
1701 return (vsi->oerrors);
1702 case IFCOUNTER_COLLISIONS:
1703 /* Collisions are by standard impossible in 40G/10G Ethernet */
1705 case IFCOUNTER_IBYTES:
1706 return (vsi->ibytes);
1707 case IFCOUNTER_OBYTES:
1708 return (vsi->obytes);
1709 case IFCOUNTER_IMCASTS:
1710 return (vsi->imcasts);
1711 case IFCOUNTER_OMCASTS:
1712 return (vsi->omcasts);
1713 case IFCOUNTER_IQDROPS:
1714 return (vsi->iqdrops);
1715 case IFCOUNTER_OQDROPS:
1716 return (vsi->oqdrops);
1717 case IFCOUNTER_NOPROTO:
1718 return (vsi->noproto);
1720 return (if_get_counter_default(ifp, cnt));
1726 ixl_if_vflr_handle(if_ctx_t ctx)
1728 struct ixl_pf *pf = iflib_get_softc(ctx);
1730 ixl_handle_vflr(pf);
1735 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1737 struct ixl_pf *pf = iflib_get_softc(ctx);
1739 if (pf->read_i2c_byte == NULL)
1742 for (int i = 0; i < req->len; i++)
1743 if (pf->read_i2c_byte(pf, req->offset + i,
1744 req->dev_addr, &req->data[i]))
1750 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1752 struct ixl_pf *pf = iflib_get_softc(ctx);
1753 struct ifdrv *ifd = (struct ifdrv *)data;
1757 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1758 * performing privilege checks. It is important that this function
1759 * perform the necessary checks for commands which should only be
1760 * executed by privileged threads.
1766 /* NVM update command */
1767 if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1768 error = priv_check(curthread, PRIV_DRIVER);
1771 error = ixl_handle_nvmupd_cmd(pf, ifd);
1783 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1784 * @ctx: iflib context
1785 * @event: event code to check
1787 * Defaults to returning false for every event.
1789 * @returns true if iflib needs to reinit the interface, false otherwise
1792 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1795 case IFLIB_RESTART_VLAN_CONFIG:
1802 ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1804 struct ixl_vsi *vsi = arg;
1806 ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
1811 * Sanity check and save off tunable values.
1814 ixl_save_pf_tunables(struct ixl_pf *pf)
1816 device_t dev = pf->dev;
1818 /* Save tunable information */
1820 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1823 pf->recovery_mode = ixl_debug_recovery_mode;
1825 pf->dbg_mask = ixl_core_debug_mask;
1826 pf->hw.debug_mask = ixl_shared_debug_mask;
1827 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1828 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1830 pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1831 pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1834 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1835 pf->i2c_access_method = 0;
1837 pf->i2c_access_method = ixl_i2c_access_method;
1839 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1840 device_printf(dev, "Invalid tx_itr value of %d set!\n",
1842 device_printf(dev, "tx_itr must be between %d and %d, "
1845 device_printf(dev, "Using default value of %d instead\n",
1847 pf->tx_itr = IXL_ITR_4K;
1849 pf->tx_itr = ixl_tx_itr;
1851 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1852 device_printf(dev, "Invalid rx_itr value of %d set!\n",
1854 device_printf(dev, "rx_itr must be between %d and %d, "
1857 device_printf(dev, "Using default value of %d instead\n",
1859 pf->rx_itr = IXL_ITR_8K;
1861 pf->rx_itr = ixl_rx_itr;