1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_iw_int.h"
43 #include "ixl_pf_iov.h"
46 /*********************************************************************
48 *********************************************************************/
49 #define IXL_DRIVER_VERSION_MAJOR 2
50 #define IXL_DRIVER_VERSION_MINOR 3
51 #define IXL_DRIVER_VERSION_BUILD 3
53 #define IXL_DRIVER_VERSION_STRING \
54 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \
55 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \
56 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 /*********************************************************************
61 * Used by probe to select devices to load on
63 * ( Vendor ID, Device ID, Branding String )
64 *********************************************************************/
66 static const pci_vendor_info_t ixl_vendor_info_array[] =
68 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
89 /* required last entry */
93 /*********************************************************************
95 *********************************************************************/
96 /*** IFLIB interface ***/
97 static void *ixl_register(device_t dev);
98 static int ixl_if_attach_pre(if_ctx_t ctx);
99 static int ixl_if_attach_post(if_ctx_t ctx);
100 static int ixl_if_detach(if_ctx_t ctx);
101 static int ixl_if_shutdown(if_ctx_t ctx);
102 static int ixl_if_suspend(if_ctx_t ctx);
103 static int ixl_if_resume(if_ctx_t ctx);
104 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void ixl_if_enable_intr(if_ctx_t ctx);
106 static void ixl_if_disable_intr(if_ctx_t ctx);
107 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void ixl_if_queues_free(if_ctx_t ctx);
112 static void ixl_if_update_admin_status(if_ctx_t ctx);
113 static void ixl_if_multi_set(if_ctx_t ctx);
114 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int ixl_if_media_change(if_ctx_t ctx);
117 static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 static void ixl_if_vflr_handle(if_ctx_t ctx);
130 static void ixl_save_pf_tunables(struct ixl_pf *);
131 static int ixl_allocate_pci_resources(struct ixl_pf *);
132 static void ixl_setup_ssctx(struct ixl_pf *pf);
133 static void ixl_admin_timer(void *arg);
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
139 static device_method_t ixl_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_register, ixl_register),
142 DEVMETHOD(device_probe, iflib_device_probe),
143 DEVMETHOD(device_attach, iflib_device_attach),
144 DEVMETHOD(device_detach, iflib_device_detach),
145 DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
148 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
149 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
154 static driver_t ixl_driver = {
155 "ixl", ixl_methods, sizeof(struct ixl_pf),
158 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
159 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
160 MODULE_VERSION(ixl, 3);
162 MODULE_DEPEND(ixl, pci, 1, 1, 1);
163 MODULE_DEPEND(ixl, ether, 1, 1, 1);
164 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
166 static device_method_t ixl_if_methods[] = {
167 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
168 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
169 DEVMETHOD(ifdi_detach, ixl_if_detach),
170 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
171 DEVMETHOD(ifdi_suspend, ixl_if_suspend),
172 DEVMETHOD(ifdi_resume, ixl_if_resume),
173 DEVMETHOD(ifdi_init, ixl_if_init),
174 DEVMETHOD(ifdi_stop, ixl_if_stop),
175 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
176 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
177 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
178 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
179 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
181 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
182 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
183 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
184 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
185 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
186 DEVMETHOD(ifdi_media_status, ixl_if_media_status),
187 DEVMETHOD(ifdi_media_change, ixl_if_media_change),
188 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
189 DEVMETHOD(ifdi_timer, ixl_if_timer),
190 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
191 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
192 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
193 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
194 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
195 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
197 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
198 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
199 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
200 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
207 static driver_t ixl_if_driver = {
208 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
212 ** TUNEABLE PARAMETERS:
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
216 "ixl driver parameters");
220 * Leave this on unless you need to send flow control
221 * frames (or other control frames) from software
223 static int ixl_enable_tx_fc_filter = 1;
224 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
225 &ixl_enable_tx_fc_filter);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
227 &ixl_enable_tx_fc_filter, 0,
228 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
232 static int ixl_debug_recovery_mode = 0;
233 TUNABLE_INT("hw.ixl.debug_recovery_mode",
234 &ixl_debug_recovery_mode);
235 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
236 &ixl_debug_recovery_mode, 0,
237 "Act like when FW entered recovery mode (for debugging)");
240 static int ixl_i2c_access_method = 0;
241 TUNABLE_INT("hw.ixl.i2c_access_method",
242 &ixl_i2c_access_method);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
244 &ixl_i2c_access_method, 0,
245 IXL_SYSCTL_HELP_I2C_METHOD);
247 static int ixl_enable_vf_loopback = 1;
248 TUNABLE_INT("hw.ixl.enable_vf_loopback",
249 &ixl_enable_vf_loopback);
250 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
251 &ixl_enable_vf_loopback, 0,
252 IXL_SYSCTL_HELP_VF_LOOPBACK);
255 * Different method for processing TX descriptor
258 static int ixl_enable_head_writeback = 1;
259 TUNABLE_INT("hw.ixl.enable_head_writeback",
260 &ixl_enable_head_writeback);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
262 &ixl_enable_head_writeback, 0,
263 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
265 static int ixl_core_debug_mask = 0;
266 TUNABLE_INT("hw.ixl.core_debug_mask",
267 &ixl_core_debug_mask);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
269 &ixl_core_debug_mask, 0,
270 "Display debug statements that are printed in non-shared code");
272 static int ixl_shared_debug_mask = 0;
273 TUNABLE_INT("hw.ixl.shared_debug_mask",
274 &ixl_shared_debug_mask);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
276 &ixl_shared_debug_mask, 0,
277 "Display debug statements that are printed in shared code");
281 ** Controls for Interrupt Throttling
282 ** - true/false for dynamic adjustment
283 ** - default values for static ITR
285 static int ixl_dynamic_rx_itr = 0;
286 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
287 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
288 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
290 static int ixl_dynamic_tx_itr = 0;
291 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
293 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
296 static int ixl_rx_itr = IXL_ITR_8K;
297 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
299 &ixl_rx_itr, 0, "RX Interrupt Rate");
301 static int ixl_tx_itr = IXL_ITR_4K;
302 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
304 &ixl_tx_itr, 0, "TX Interrupt Rate");
306 static int ixl_flow_control = -1;
307 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
308 &ixl_flow_control, 0, "Initial Flow Control setting");
311 int ixl_enable_iwarp = 0;
312 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
314 &ixl_enable_iwarp, 0, "iWARP enabled");
316 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
317 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
318 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
319 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
322 extern struct if_txrx ixl_txrx_hwb;
323 extern struct if_txrx ixl_txrx_dwb;
325 static struct if_shared_ctx ixl_sctx_init = {
326 .isc_magic = IFLIB_MAGIC,
327 .isc_q_align = PAGE_SIZE,
328 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
329 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
330 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
331 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
332 .isc_rx_maxsize = 16384,
333 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
334 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
339 .isc_admin_intrcnt = 1,
340 .isc_vendor_info = ixl_vendor_info_array,
341 .isc_driver_version = IXL_DRIVER_VERSION_STRING,
342 .isc_driver = &ixl_if_driver,
343 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
345 .isc_nrxd_min = {IXL_MIN_RING},
346 .isc_ntxd_min = {IXL_MIN_RING},
347 .isc_nrxd_max = {IXL_MAX_RING},
348 .isc_ntxd_max = {IXL_MAX_RING},
349 .isc_nrxd_default = {IXL_DEFAULT_RING},
350 .isc_ntxd_default = {IXL_DEFAULT_RING},
355 ixl_register(device_t dev)
357 return (&ixl_sctx_init);
361 ixl_allocate_pci_resources(struct ixl_pf *pf)
363 device_t dev = iflib_get_dev(pf->vsi.ctx);
364 struct i40e_hw *hw = &pf->hw;
369 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
372 if (!(pf->pci_mem)) {
373 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
377 /* Save off the PCI information */
378 hw->vendor_id = pci_get_vendor(dev);
379 hw->device_id = pci_get_device(dev);
380 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
381 hw->subsystem_vendor_id =
382 pci_read_config(dev, PCIR_SUBVEND_0, 2);
383 hw->subsystem_device_id =
384 pci_read_config(dev, PCIR_SUBDEV_0, 2);
386 hw->bus.device = pci_get_slot(dev);
387 hw->bus.func = pci_get_function(dev);
389 /* Save off register access information */
390 pf->osdep.mem_bus_space_tag =
391 rman_get_bustag(pf->pci_mem);
392 pf->osdep.mem_bus_space_handle =
393 rman_get_bushandle(pf->pci_mem);
394 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
395 pf->osdep.flush_reg = I40E_GLGEN_STAT;
398 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
399 pf->hw.back = &pf->osdep;
405 ixl_setup_ssctx(struct ixl_pf *pf)
407 if_softc_ctx_t scctx = pf->vsi.shared;
408 struct i40e_hw *hw = &pf->hw;
410 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
411 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
412 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
413 } else if (hw->mac.type == I40E_MAC_X722)
414 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
416 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
418 if (pf->vsi.enable_head_writeback) {
419 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
420 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
421 scctx->isc_txrx = &ixl_txrx_hwb;
423 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
425 scctx->isc_txrx = &ixl_txrx_dwb;
428 scctx->isc_txrx->ift_legacy_intr = ixl_intr;
429 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
430 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
431 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
432 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
433 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
434 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
435 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
436 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
437 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
438 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
442 ixl_admin_timer(void *arg)
444 struct ixl_pf *pf = (struct ixl_pf *)arg;
446 if (ixl_test_state(&pf->state, IXL_STATE_LINK_POLLING)) {
447 struct i40e_hw *hw = &pf->hw;
449 enum i40e_status_code status;
451 hw->phy.get_link_info = TRUE;
452 status = i40e_get_link_status(hw, &pf->link_up);
453 if (status == I40E_SUCCESS) {
454 ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING);
455 /* OS link info is updated in the admin task */
457 device_printf(pf->dev,
458 "%s: i40e_get_link_status status %s, aq error %s\n",
459 __func__, i40e_stat_str(hw, status),
460 i40e_aq_str(hw, hw->aq.asq_last_status));
461 stime = getsbinuptime();
462 if (stime - pf->link_poll_start > IXL_PF_MAX_LINK_POLL) {
463 device_printf(pf->dev, "Polling link status failed\n");
464 ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING);
469 /* Fire off the admin task */
470 iflib_admin_intr_deferred(pf->vsi.ctx);
472 /* Reschedule the admin timer */
473 callout_schedule(&pf->admin_timer, hz/2);
477 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
479 struct ixl_vsi *vsi = &pf->vsi;
480 struct i40e_hw *hw = &pf->hw;
481 device_t dev = pf->dev;
483 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
485 i40e_get_mac_addr(hw, hw->mac.addr);
487 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
488 ixl_configure_intr0_msix(pf);
489 ixl_enable_intr0(hw);
498 ixl_if_attach_pre(if_ctx_t ctx)
504 enum i40e_get_fw_lldp_status_resp lldp_status;
505 struct i40e_filter_control_settings filter;
506 enum i40e_status_code status;
509 dev = iflib_get_dev(ctx);
510 pf = iflib_get_softc(ctx);
512 INIT_DBG_DEV(dev, "begin");
524 vsi->media = iflib_get_media(ctx);
525 vsi->shared = iflib_get_softc_ctx(ctx);
527 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
528 "%s:admin", device_get_nameunit(dev));
529 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
530 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
532 /* Save tunable values */
533 ixl_save_pf_tunables(pf);
535 /* Do PCI setup - map BAR0, etc */
536 if (ixl_allocate_pci_resources(pf)) {
537 device_printf(dev, "Allocation of PCI resources failed\n");
542 /* Establish a clean starting point */
544 i40e_set_mac_type(hw);
546 error = ixl_pf_reset(pf);
550 /* Initialize the shared code */
551 status = i40e_init_shared_code(hw);
553 device_printf(dev, "Unable to initialize shared code, error %s\n",
554 i40e_stat_str(hw, status));
559 /* Set up the admin queue */
560 hw->aq.num_arq_entries = IXL_AQ_LEN;
561 hw->aq.num_asq_entries = IXL_AQ_LEN;
562 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
563 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
565 status = i40e_init_adminq(hw);
566 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
567 device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
568 i40e_stat_str(hw, status));
572 ixl_print_nvm_version(pf);
574 if (status == I40E_ERR_FIRMWARE_API_VERSION) {
575 device_printf(dev, "The driver for the device stopped "
576 "because the NVM image is newer than expected.\n");
577 device_printf(dev, "You must install the most recent version of "
578 "the network driver.\n");
583 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
584 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
585 device_printf(dev, "The driver for the device detected "
586 "a newer version of the NVM image than expected.\n");
587 device_printf(dev, "Please install the most recent version "
588 "of the network driver.\n");
589 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
590 device_printf(dev, "The driver for the device detected "
591 "an older version of the NVM image than expected.\n");
592 device_printf(dev, "Please update the NVM image.\n");
595 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
596 error = ixl_attach_pre_recovery_mode(pf);
603 i40e_clear_pxe_mode(hw);
605 /* Get capabilities from the device */
606 error = ixl_get_hw_capabilities(pf);
608 device_printf(dev, "get_hw_capabilities failed: %d\n",
613 /* Set up host memory cache */
614 error = ixl_setup_hmc(pf);
618 /* Disable LLDP from the firmware for certain NVM versions */
619 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
620 (pf->hw.aq.fw_maj_ver < 4)) {
621 i40e_aq_stop_lldp(hw, true, false, NULL);
622 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
625 /* Try enabling Energy Efficient Ethernet (EEE) mode */
626 if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
627 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
629 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
631 /* Get MAC addresses from hardware */
632 i40e_get_mac_addr(hw, hw->mac.addr);
633 error = i40e_validate_mac_addr(hw->mac.addr);
635 device_printf(dev, "validate_mac_addr failed: %d\n", error);
638 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
639 iflib_set_mac(ctx, hw->mac.addr);
640 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
642 /* Set up the device filtering */
643 bzero(&filter, sizeof(filter));
644 filter.enable_ethtype = TRUE;
645 filter.enable_macvlan = TRUE;
646 filter.enable_fdir = FALSE;
647 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
648 if (i40e_set_filter_control(hw, &filter))
649 device_printf(dev, "i40e_set_filter_control() failed\n");
651 /* Query device FW LLDP status */
652 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
653 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
654 ixl_set_state(&pf->state,
655 IXL_STATE_FW_LLDP_DISABLED);
657 ixl_clear_state(&pf->state,
658 IXL_STATE_FW_LLDP_DISABLED);
662 /* Tell FW to apply DCB config on link up */
663 i40e_aq_set_dcb_parameters(hw, true, NULL);
665 /* Fill out iflib parameters */
668 INIT_DBG_DEV(dev, "end");
672 ixl_shutdown_hmc(pf);
674 i40e_shutdown_adminq(hw);
676 ixl_free_pci_resources(pf);
678 mtx_lock(&pf->admin_mtx);
679 callout_stop(&pf->admin_timer);
680 mtx_unlock(&pf->admin_mtx);
681 mtx_destroy(&pf->admin_mtx);
686 ixl_if_attach_post(if_ctx_t ctx)
693 enum i40e_status_code status;
695 dev = iflib_get_dev(ctx);
696 pf = iflib_get_softc(ctx);
698 INIT_DBG_DEV(dev, "begin");
701 vsi->ifp = iflib_get_ifp(ctx);
704 /* Save off determined number of queues for interface */
705 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
706 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
708 /* Setup OS network interface / ifnet */
709 if (ixl_setup_interface(dev, pf)) {
710 device_printf(dev, "interface setup failed!\n");
715 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
716 /* Keep admin queue interrupts active while driver is loaded */
717 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
718 ixl_configure_intr0_msix(pf);
719 ixl_enable_intr0(hw);
722 ixl_add_sysctls_recovery_mode(pf);
724 /* Start the admin timer */
725 mtx_lock(&pf->admin_mtx);
726 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
727 mtx_unlock(&pf->admin_mtx);
731 error = ixl_switch_config(pf);
733 device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
738 /* Add protocol filters to list */
739 ixl_init_filters(vsi);
741 /* Init queue allocation manager */
742 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
744 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
748 /* reserve a contiguous allocation for the PF's VSI */
749 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
750 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
752 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
756 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
757 pf->qtag.num_allocated, pf->qtag.num_active);
759 /* Determine link state */
760 error = ixl_attach_get_link_status(pf);
764 /* Limit PHY interrupts to link, autoneg, and modules failure */
765 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
768 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
769 " aq_err %s\n", i40e_stat_str(hw, status),
770 i40e_aq_str(hw, hw->aq.asq_last_status));
774 /* Get the bus configuration and set the shared code */
775 ixl_get_bus_info(pf);
777 /* Keep admin queue interrupts active while driver is loaded */
778 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
779 ixl_configure_intr0_msix(pf);
780 ixl_enable_intr0(hw);
783 /* Set initial advertised speed sysctl value */
784 ixl_set_initial_advertised_speeds(pf);
786 /* Initialize statistics & add sysctls */
787 ixl_add_device_sysctls(pf);
788 ixl_pf_reset_stats(pf);
789 ixl_update_stats_counters(pf);
790 ixl_add_hw_stats(pf);
793 * Driver may have been reloaded. Ensure that the link state
794 * is consistent with current settings.
796 ixl_set_link(pf, ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN));
798 hw->phy.get_link_info = true;
799 status = i40e_get_link_status(hw, &pf->link_up);
800 if (status != I40E_SUCCESS) {
802 "%s get link status, status: %s aq_err=%s\n",
803 __func__, i40e_stat_str(hw, status),
804 i40e_aq_str(hw, hw->aq.asq_last_status));
806 * Most probably FW has not finished configuring PHY.
807 * Retry periodically in a timer callback.
809 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
810 pf->link_poll_start = getsbinuptime();
812 ixl_update_link_status(pf);
815 ixl_initialize_sriov(pf);
819 if (hw->func_caps.iwarp && ixl_enable_iwarp) {
820 pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
821 if (pf->iw_enabled) {
822 error = ixl_iw_pf_attach(pf);
825 "interfacing to iWARP driver failed: %d\n",
829 device_printf(dev, "iWARP ready\n");
831 device_printf(dev, "iWARP disabled on this device "
832 "(no MSI-X vectors)\n");
834 pf->iw_enabled = false;
835 device_printf(dev, "The device is not iWARP enabled\n");
838 /* Start the admin timer */
839 mtx_lock(&pf->admin_mtx);
840 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
841 mtx_unlock(&pf->admin_mtx);
843 INIT_DBG_DEV(dev, "end");
847 INIT_DEBUGOUT("end: error %d", error);
848 /* ixl_if_detach() is called on error from this */
853 * XXX: iflib always ignores the return value of detach()
854 * -> This means that this isn't allowed to fail
857 ixl_if_detach(if_ctx_t ctx)
859 struct ixl_pf *pf = iflib_get_softc(ctx);
860 struct ixl_vsi *vsi = &pf->vsi;
861 struct i40e_hw *hw = &pf->hw;
862 device_t dev = pf->dev;
863 enum i40e_status_code status;
868 INIT_DBG_DEV(dev, "begin");
870 /* Stop the admin timer */
871 mtx_lock(&pf->admin_mtx);
872 callout_stop(&pf->admin_timer);
873 mtx_unlock(&pf->admin_mtx);
874 mtx_destroy(&pf->admin_mtx);
877 if (ixl_enable_iwarp && pf->iw_enabled) {
878 error = ixl_iw_pf_detach(pf);
879 if (error == EBUSY) {
880 device_printf(dev, "iwarp in use; stop it first.\n");
885 /* Remove all previously allocated media types */
886 ifmedia_removeall(vsi->media);
888 /* Shutdown LAN HMC */
889 ixl_shutdown_hmc(pf);
891 /* Shutdown admin queue */
892 ixl_disable_intr0(hw);
893 status = i40e_shutdown_adminq(hw);
896 "i40e_shutdown_adminq() failed with status %s\n",
897 i40e_stat_str(hw, status));
899 ixl_pf_qmgr_destroy(&pf->qmgr);
900 ixl_free_pci_resources(pf);
901 ixl_free_filters(&vsi->ftl);
902 INIT_DBG_DEV(dev, "end");
907 ixl_if_shutdown(if_ctx_t ctx)
911 INIT_DEBUGOUT("ixl_if_shutdown: begin");
913 /* TODO: Call ixl_if_stop()? */
915 /* TODO: Then setup low power mode */
921 ixl_if_suspend(if_ctx_t ctx)
925 INIT_DEBUGOUT("ixl_if_suspend: begin");
927 /* TODO: Call ixl_if_stop()? */
929 /* TODO: Then setup low power mode */
935 ixl_if_resume(if_ctx_t ctx)
937 if_t ifp = iflib_get_ifp(ctx);
939 INIT_DEBUGOUT("ixl_if_resume: begin");
941 /* Read & clear wake-up registers */
943 /* Required after D3->D0 transition */
944 if (if_getflags(ifp) & IFF_UP)
951 ixl_if_init(if_ctx_t ctx)
953 struct ixl_pf *pf = iflib_get_softc(ctx);
954 struct ixl_vsi *vsi = &pf->vsi;
955 struct i40e_hw *hw = &pf->hw;
956 if_t ifp = iflib_get_ifp(ctx);
957 device_t dev = iflib_get_dev(ctx);
958 u8 tmpaddr[ETHER_ADDR_LEN];
961 if (IXL_PF_IN_RECOVERY_MODE(pf))
964 * If the aq is dead here, it probably means something outside of the driver
965 * did something to the adapter, like a PF reset.
966 * So, rebuild the driver's state here if that occurs.
968 if (!i40e_check_asq_alive(&pf->hw)) {
969 device_printf(dev, "Admin Queue is down; resetting...\n");
970 ixl_teardown_hw_structs(pf);
971 ixl_rebuild_hw_structs_after_reset(pf, false);
974 /* Get the latest mac address... User might use a LAA */
975 bcopy(if_getlladdr(vsi->ifp), tmpaddr, ETH_ALEN);
976 if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
977 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
978 ixl_del_all_vlan_filters(vsi, hw->mac.addr);
979 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
980 ret = i40e_aq_mac_address_write(hw,
981 I40E_AQC_WRITE_TYPE_LAA_ONLY,
984 device_printf(dev, "LLA address change failed!!\n");
988 * New filters are configured by ixl_reconfigure_filters
989 * at the end of ixl_init_locked.
993 iflib_set_mac(ctx, hw->mac.addr);
995 /* Prepare the VSI: rings, hmc contexts, etc... */
996 if (ixl_initialize_vsi(vsi)) {
997 device_printf(dev, "initialize vsi failed!!\n");
1001 ixl_set_link(pf, true);
1003 /* Reconfigure multicast filters in HW */
1004 ixl_if_multi_set(ctx);
1009 /* Set up MSI-X routing and the ITR settings */
1010 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1011 ixl_configure_queue_intr_msix(pf);
1012 ixl_configure_itr(pf);
1014 ixl_configure_legacy(pf);
1016 if (vsi->enable_head_writeback)
1017 ixl_init_tx_cidx(vsi);
1019 ixl_init_tx_rsqs(vsi);
1021 ixl_enable_rings(vsi);
1023 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1025 /* Re-add configure filters to HW */
1026 ixl_reconfigure_filters(vsi);
1028 /* Configure promiscuous mode */
1029 ixl_if_promisc_set(ctx, if_getflags(ifp));
1032 if (ixl_enable_iwarp && pf->iw_enabled) {
1033 ret = ixl_iw_pf_init(pf);
1036 "initialize iwarp failed, code %d\n", ret);
1042 ixl_if_stop(if_ctx_t ctx)
1044 struct ixl_pf *pf = iflib_get_softc(ctx);
1045 if_t ifp = iflib_get_ifp(ctx);
1046 struct ixl_vsi *vsi = &pf->vsi;
1048 INIT_DEBUGOUT("ixl_if_stop: begin\n");
1050 if (IXL_PF_IN_RECOVERY_MODE(pf))
1053 // TODO: This may need to be reworked
1055 /* Stop iWARP device */
1056 if (ixl_enable_iwarp && pf->iw_enabled)
1060 ixl_disable_rings_intr(vsi);
1061 ixl_disable_rings(pf, vsi, &pf->qtag);
1064 * Don't set link state if only reconfiguring
1065 * e.g. on MTU change.
1067 if ((if_getflags(ifp) & IFF_UP) == 0 &&
1068 !ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN))
1069 ixl_set_link(pf, false);
1073 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1075 struct ixl_pf *pf = iflib_get_softc(ctx);
1076 struct ixl_vsi *vsi = &pf->vsi;
1077 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1078 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1079 int err, i, rid, vector = 0;
1082 MPASS(vsi->shared->isc_nrxqsets > 0);
1083 MPASS(vsi->shared->isc_ntxqsets > 0);
1085 /* Admin Que must use vector 0*/
1087 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1088 ixl_msix_adminq, pf, 0, "aq");
1090 iflib_irq_free(ctx, &vsi->irq);
1091 device_printf(iflib_get_dev(ctx),
1092 "Failed to register Admin Que handler");
1097 /* Create soft IRQ for handling VFLRs */
1098 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1101 /* Now set up the stations */
1102 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1105 snprintf(buf, sizeof(buf), "rxq%d", i);
1106 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1107 IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1108 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1109 * what's expected in the iflib context? */
1111 device_printf(iflib_get_dev(ctx),
1112 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1113 vsi->num_rx_queues = i + 1;
1116 rx_que->msix = vector;
1119 bzero(buf, sizeof(buf));
1121 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1122 snprintf(buf, sizeof(buf), "txq%d", i);
1123 iflib_softirq_alloc_generic(ctx,
1124 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1125 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1127 /* TODO: Maybe call a strategy function for this to figure out which
1128 * interrupts to map Tx queues to. I don't know if there's an immediately
1129 * better way than this other than a user-supplied map, though. */
1130 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1135 iflib_irq_free(ctx, &vsi->irq);
1136 rx_que = vsi->rx_queues;
1137 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1138 iflib_irq_free(ctx, &rx_que->que_irq);
1143 * Enable all interrupts
1146 * iflib_init_locked, after ixl_if_init()
1149 ixl_if_enable_intr(if_ctx_t ctx)
1151 struct ixl_pf *pf = iflib_get_softc(ctx);
1152 struct ixl_vsi *vsi = &pf->vsi;
1153 struct i40e_hw *hw = vsi->hw;
1154 struct ixl_rx_queue *que = vsi->rx_queues;
1156 ixl_enable_intr0(hw);
1157 /* Enable queue interrupts */
1158 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1159 /* TODO: Queue index parameter is probably wrong */
1160 ixl_enable_queue(hw, que->rxr.me);
1164 * Disable queue interrupts
1166 * Other interrupt causes need to remain active.
1169 ixl_if_disable_intr(if_ctx_t ctx)
1171 struct ixl_pf *pf = iflib_get_softc(ctx);
1172 struct ixl_vsi *vsi = &pf->vsi;
1173 struct i40e_hw *hw = vsi->hw;
1174 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1176 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1177 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1178 ixl_disable_queue(hw, rx_que->msix - 1);
1180 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1181 // stops queues from triggering interrupts
1182 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1187 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1189 struct ixl_pf *pf = iflib_get_softc(ctx);
1190 struct ixl_vsi *vsi = &pf->vsi;
1191 struct i40e_hw *hw = vsi->hw;
1192 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
1194 ixl_enable_queue(hw, rx_que->msix - 1);
1199 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1201 struct ixl_pf *pf = iflib_get_softc(ctx);
1202 struct ixl_vsi *vsi = &pf->vsi;
1203 struct i40e_hw *hw = vsi->hw;
1204 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1206 ixl_enable_queue(hw, tx_que->msix - 1);
1211 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1213 struct ixl_pf *pf = iflib_get_softc(ctx);
1214 struct ixl_vsi *vsi = &pf->vsi;
1215 if_softc_ctx_t scctx = vsi->shared;
1216 struct ixl_tx_queue *que;
1217 int i, j, error = 0;
1219 MPASS(scctx->isc_ntxqsets > 0);
1221 MPASS(scctx->isc_ntxqsets == ntxqsets);
1223 /* Allocate queue structure memory */
1224 if (!(vsi->tx_queues =
1225 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1226 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1230 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1231 struct tx_ring *txr = &que->txr;
1236 if (!vsi->enable_head_writeback) {
1237 /* Allocate report status array */
1238 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1239 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1243 /* Init report status array */
1244 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1245 txr->tx_rsq[j] = QIDX_INVALID;
1247 /* get the virtual and physical address of the hardware queues */
1248 txr->tail = I40E_QTX_TAIL(txr->me);
1249 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1250 txr->tx_paddr = paddrs[i * ntxqs];
1256 ixl_if_queues_free(ctx);
1261 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1263 struct ixl_pf *pf = iflib_get_softc(ctx);
1264 struct ixl_vsi *vsi = &pf->vsi;
1265 struct ixl_rx_queue *que;
1269 if_softc_ctx_t scctx = vsi->shared;
1270 MPASS(scctx->isc_nrxqsets > 0);
1272 MPASS(scctx->isc_nrxqsets == nrxqsets);
1275 /* Allocate queue structure memory */
1276 if (!(vsi->rx_queues =
1277 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1278 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1279 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1284 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1285 struct rx_ring *rxr = &que->rxr;
1290 /* get the virtual and physical address of the hardware queues */
1291 rxr->tail = I40E_QRX_TAIL(rxr->me);
1292 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1293 rxr->rx_paddr = paddrs[i * nrxqs];
1299 ixl_if_queues_free(ctx);
1304 ixl_if_queues_free(if_ctx_t ctx)
1306 struct ixl_pf *pf = iflib_get_softc(ctx);
1307 struct ixl_vsi *vsi = &pf->vsi;
1309 if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1310 struct ixl_tx_queue *que;
1313 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1314 struct tx_ring *txr = &que->txr;
1315 if (txr->tx_rsq != NULL) {
1316 free(txr->tx_rsq, M_IXL);
1322 if (vsi->tx_queues != NULL) {
1323 free(vsi->tx_queues, M_IXL);
1324 vsi->tx_queues = NULL;
1326 if (vsi->rx_queues != NULL) {
1327 free(vsi->rx_queues, M_IXL);
1328 vsi->rx_queues = NULL;
1331 if (!IXL_PF_IN_RECOVERY_MODE(pf))
1332 sysctl_ctx_free(&vsi->sysctl_ctx);
1336 ixl_update_link_status(struct ixl_pf *pf)
1338 struct ixl_vsi *vsi = &pf->vsi;
1339 struct i40e_hw *hw = &pf->hw;
1343 if (vsi->link_active == FALSE) {
1344 vsi->link_active = TRUE;
1345 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1346 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1347 ixl_link_up_msg(pf);
1349 ixl_broadcast_link_state(pf);
1352 } else { /* Link down */
1353 if (vsi->link_active == TRUE) {
1354 vsi->link_active = FALSE;
1355 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1357 ixl_broadcast_link_state(pf);
1364 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1366 device_t dev = pf->dev;
1367 u32 rxq_idx, qtx_ctl;
1369 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1370 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1371 qtx_ctl = e->desc.params.external.param1;
1373 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1374 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1378 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1380 enum i40e_status_code status = I40E_SUCCESS;
1381 struct i40e_arq_event_info event;
1382 struct i40e_hw *hw = &pf->hw;
1383 device_t dev = pf->dev;
1387 event.buf_len = IXL_AQ_BUF_SZ;
1388 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1389 if (!event.msg_buf) {
1390 device_printf(dev, "%s: Unable to allocate memory for Admin"
1391 " Queue event!\n", __func__);
1395 /* clean and process any events */
1397 status = i40e_clean_arq_element(hw, &event, pending);
1400 opcode = LE16_TO_CPU(event.desc.opcode);
1401 ixl_dbg(pf, IXL_DBG_AQ,
1402 "Admin Queue event: %#06x\n", opcode);
1404 case i40e_aqc_opc_get_link_status:
1405 ixl_link_event(pf, &event);
1407 case i40e_aqc_opc_send_msg_to_pf:
1409 ixl_handle_vf_msg(pf, &event);
1413 * This should only occur on no-drop queues, which
1414 * aren't currently configured.
1416 case i40e_aqc_opc_event_lan_overflow:
1417 ixl_handle_lan_overflow_event(pf, &event);
1422 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1424 free(event.msg_buf, M_IXL);
1426 /* Re-enable admin queue interrupt cause */
1427 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1428 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1429 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1435 ixl_if_update_admin_status(if_ctx_t ctx)
1437 struct ixl_pf *pf = iflib_get_softc(ctx);
1438 struct i40e_hw *hw = &pf->hw;
1441 if (IXL_PF_IS_RESETTING(pf))
1442 ixl_handle_empr_reset(pf);
1445 * Admin Queue is shut down while handling reset.
1446 * Don't proceed if it hasn't been re-initialized
1447 * e.g due to an issue with new FW.
1449 if (!i40e_check_asq_alive(&pf->hw))
1452 if (ixl_test_state(&pf->state, IXL_STATE_MDD_PENDING))
1453 ixl_handle_mdd_event(pf);
1455 ixl_process_adminq(pf, &pending);
1456 ixl_update_link_status(pf);
1459 * If there are still messages to process, reschedule ourselves.
1460 * Otherwise, re-enable our interrupt and go to sleep.
1463 iflib_admin_intr_deferred(ctx);
1465 ixl_enable_intr0(hw);
1469 ixl_if_multi_set(if_ctx_t ctx)
1471 struct ixl_pf *pf = iflib_get_softc(ctx);
1472 struct ixl_vsi *vsi = &pf->vsi;
1473 struct i40e_hw *hw = vsi->hw;
1476 IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1478 /* Delete filters for removed multicast addresses */
1479 ixl_del_multi(vsi, false);
1481 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1482 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1483 i40e_aq_set_vsi_multicast_promiscuous(hw,
1484 vsi->seid, TRUE, NULL);
1485 ixl_del_multi(vsi, true);
1490 IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1494 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1496 struct ixl_pf *pf = iflib_get_softc(ctx);
1497 struct ixl_vsi *vsi = &pf->vsi;
1499 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1500 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1501 ETHER_VLAN_ENCAP_LEN)
1504 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1505 ETHER_VLAN_ENCAP_LEN;
1511 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1513 struct ixl_pf *pf = iflib_get_softc(ctx);
1514 struct i40e_hw *hw = &pf->hw;
1516 INIT_DEBUGOUT("ixl_media_status: begin");
1518 ifmr->ifm_status = IFM_AVALID;
1519 ifmr->ifm_active = IFM_ETHER;
1525 ifmr->ifm_status |= IFM_ACTIVE;
1526 /* Hardware is always full-duplex */
1527 ifmr->ifm_active |= IFM_FDX;
1529 switch (hw->phy.link_info.phy_type) {
1531 case I40E_PHY_TYPE_100BASE_TX:
1532 ifmr->ifm_active |= IFM_100_TX;
1535 case I40E_PHY_TYPE_1000BASE_T:
1536 ifmr->ifm_active |= IFM_1000_T;
1538 case I40E_PHY_TYPE_1000BASE_SX:
1539 ifmr->ifm_active |= IFM_1000_SX;
1541 case I40E_PHY_TYPE_1000BASE_LX:
1542 ifmr->ifm_active |= IFM_1000_LX;
1544 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1545 ifmr->ifm_active |= IFM_1000_T;
1548 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1549 ifmr->ifm_active |= IFM_2500_T;
1552 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1553 ifmr->ifm_active |= IFM_5000_T;
1556 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1557 ifmr->ifm_active |= IFM_10G_TWINAX;
1559 case I40E_PHY_TYPE_10GBASE_SR:
1560 ifmr->ifm_active |= IFM_10G_SR;
1562 case I40E_PHY_TYPE_10GBASE_LR:
1563 ifmr->ifm_active |= IFM_10G_LR;
1565 case I40E_PHY_TYPE_10GBASE_T:
1566 ifmr->ifm_active |= IFM_10G_T;
1568 case I40E_PHY_TYPE_XAUI:
1569 case I40E_PHY_TYPE_XFI:
1570 ifmr->ifm_active |= IFM_10G_TWINAX;
1572 case I40E_PHY_TYPE_10GBASE_AOC:
1573 ifmr->ifm_active |= IFM_10G_AOC;
1576 case I40E_PHY_TYPE_25GBASE_KR:
1577 ifmr->ifm_active |= IFM_25G_KR;
1579 case I40E_PHY_TYPE_25GBASE_CR:
1580 ifmr->ifm_active |= IFM_25G_CR;
1582 case I40E_PHY_TYPE_25GBASE_SR:
1583 ifmr->ifm_active |= IFM_25G_SR;
1585 case I40E_PHY_TYPE_25GBASE_LR:
1586 ifmr->ifm_active |= IFM_25G_LR;
1588 case I40E_PHY_TYPE_25GBASE_AOC:
1589 ifmr->ifm_active |= IFM_25G_AOC;
1591 case I40E_PHY_TYPE_25GBASE_ACC:
1592 ifmr->ifm_active |= IFM_25G_ACC;
1595 case I40E_PHY_TYPE_40GBASE_CR4:
1596 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1597 ifmr->ifm_active |= IFM_40G_CR4;
1599 case I40E_PHY_TYPE_40GBASE_SR4:
1600 ifmr->ifm_active |= IFM_40G_SR4;
1602 case I40E_PHY_TYPE_40GBASE_LR4:
1603 ifmr->ifm_active |= IFM_40G_LR4;
1605 case I40E_PHY_TYPE_XLAUI:
1606 ifmr->ifm_active |= IFM_OTHER;
1608 case I40E_PHY_TYPE_1000BASE_KX:
1609 ifmr->ifm_active |= IFM_1000_KX;
1611 case I40E_PHY_TYPE_SGMII:
1612 ifmr->ifm_active |= IFM_1000_SGMII;
1614 /* ERJ: What's the difference between these? */
1615 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1616 case I40E_PHY_TYPE_10GBASE_CR1:
1617 ifmr->ifm_active |= IFM_10G_CR1;
1619 case I40E_PHY_TYPE_10GBASE_KX4:
1620 ifmr->ifm_active |= IFM_10G_KX4;
1622 case I40E_PHY_TYPE_10GBASE_KR:
1623 ifmr->ifm_active |= IFM_10G_KR;
1625 case I40E_PHY_TYPE_SFI:
1626 ifmr->ifm_active |= IFM_10G_SFI;
1628 /* Our single 20G media type */
1629 case I40E_PHY_TYPE_20GBASE_KR2:
1630 ifmr->ifm_active |= IFM_20G_KR2;
1632 case I40E_PHY_TYPE_40GBASE_KR4:
1633 ifmr->ifm_active |= IFM_40G_KR4;
1635 case I40E_PHY_TYPE_XLPPI:
1636 case I40E_PHY_TYPE_40GBASE_AOC:
1637 ifmr->ifm_active |= IFM_40G_XLPPI;
1639 /* Unknown to driver */
1641 ifmr->ifm_active |= IFM_UNKNOWN;
1644 /* Report flow control status as well */
1645 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1646 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1647 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1648 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1652 ixl_if_media_change(if_ctx_t ctx)
1654 struct ifmedia *ifm = iflib_get_media(ctx);
1656 INIT_DEBUGOUT("ixl_media_change: begin");
1658 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1661 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1666 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1668 struct ixl_pf *pf = iflib_get_softc(ctx);
1669 struct ixl_vsi *vsi = &pf->vsi;
1670 if_t ifp = iflib_get_ifp(ctx);
1671 struct i40e_hw *hw = vsi->hw;
1673 bool uni = FALSE, multi = FALSE;
1675 if (flags & IFF_PROMISC)
1677 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1681 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1682 vsi->seid, uni, NULL, true);
1685 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1686 vsi->seid, multi, NULL);
1691 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1693 struct ixl_pf *pf = iflib_get_softc(ctx);
1698 ixl_update_stats_counters(pf);
1702 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1704 struct ixl_pf *pf = iflib_get_softc(ctx);
1705 struct ixl_vsi *vsi = &pf->vsi;
1706 struct i40e_hw *hw = vsi->hw;
1707 if_t ifp = iflib_get_ifp(ctx);
1709 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1713 * Keep track of registered VLANS to know what
1714 * filters have to be configured when VLAN_HWFILTER
1715 * capability is enabled.
1718 bit_set(vsi->vlans_map, vtag);
1720 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1723 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1724 ixl_add_filter(vsi, hw->mac.addr, vtag);
1725 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1727 * There is not enough HW resources to add filters
1728 * for all registered VLANs. Re-configure filtering
1729 * to allow reception of all expected traffic.
1731 device_printf(vsi->dev,
1732 "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1733 ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1734 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1739 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1741 struct ixl_pf *pf = iflib_get_softc(ctx);
1742 struct ixl_vsi *vsi = &pf->vsi;
1743 struct i40e_hw *hw = vsi->hw;
1744 if_t ifp = iflib_get_ifp(ctx);
1746 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1750 bit_clear(vsi->vlans_map, vtag);
1752 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1755 /* One filter is used for untagged frames */
1756 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS - 1)
1757 ixl_del_filter(vsi, hw->mac.addr, vtag);
1758 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS - 1) {
1759 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1760 ixl_add_vlan_filters(vsi, hw->mac.addr);
1765 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1767 struct ixl_pf *pf = iflib_get_softc(ctx);
1768 struct ixl_vsi *vsi = &pf->vsi;
1769 if_t ifp = iflib_get_ifp(ctx);
1772 case IFCOUNTER_IPACKETS:
1773 return (vsi->ipackets);
1774 case IFCOUNTER_IERRORS:
1775 return (vsi->ierrors);
1776 case IFCOUNTER_OPACKETS:
1777 return (vsi->opackets);
1778 case IFCOUNTER_OERRORS:
1779 return (vsi->oerrors);
1780 case IFCOUNTER_COLLISIONS:
1781 /* Collisions are by standard impossible in 40G/10G Ethernet */
1783 case IFCOUNTER_IBYTES:
1784 return (vsi->ibytes);
1785 case IFCOUNTER_OBYTES:
1786 return (vsi->obytes);
1787 case IFCOUNTER_IMCASTS:
1788 return (vsi->imcasts);
1789 case IFCOUNTER_OMCASTS:
1790 return (vsi->omcasts);
1791 case IFCOUNTER_IQDROPS:
1792 return (vsi->iqdrops);
1793 case IFCOUNTER_OQDROPS:
1794 return (vsi->oqdrops);
1795 case IFCOUNTER_NOPROTO:
1796 return (vsi->noproto);
1798 return (if_get_counter_default(ifp, cnt));
1804 ixl_if_vflr_handle(if_ctx_t ctx)
1806 struct ixl_pf *pf = iflib_get_softc(ctx);
1808 ixl_handle_vflr(pf);
1813 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1815 struct ixl_pf *pf = iflib_get_softc(ctx);
1817 if (pf->read_i2c_byte == NULL)
1820 for (int i = 0; i < req->len; i++)
1821 if (pf->read_i2c_byte(pf, req->offset + i,
1822 req->dev_addr, &req->data[i]))
1828 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1830 struct ixl_pf *pf = iflib_get_softc(ctx);
1831 struct ifdrv *ifd = (struct ifdrv *)data;
1835 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1836 * performing privilege checks. It is important that this function
1837 * perform the necessary checks for commands which should only be
1838 * executed by privileged threads.
1844 /* NVM update command */
1845 if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1846 error = priv_check(curthread, PRIV_DRIVER);
1849 error = ixl_handle_nvmupd_cmd(pf, ifd);
1861 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1862 * @ctx: iflib context
1863 * @event: event code to check
1865 * Defaults to returning false for every event.
1867 * @returns true if iflib needs to reinit the interface, false otherwise
1870 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1873 case IFLIB_RESTART_VLAN_CONFIG:
1880 * Sanity check and save off tunable values.
1883 ixl_save_pf_tunables(struct ixl_pf *pf)
1885 device_t dev = pf->dev;
1887 /* Save tunable information */
1889 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1892 pf->recovery_mode = ixl_debug_recovery_mode;
1894 pf->dbg_mask = ixl_core_debug_mask;
1895 pf->hw.debug_mask = ixl_shared_debug_mask;
1896 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1897 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1899 pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1900 pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1903 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1904 pf->i2c_access_method = 0;
1906 pf->i2c_access_method = ixl_i2c_access_method;
1908 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1909 device_printf(dev, "Invalid tx_itr value of %d set!\n",
1911 device_printf(dev, "tx_itr must be between %d and %d, "
1914 device_printf(dev, "Using default value of %d instead\n",
1916 pf->tx_itr = IXL_ITR_4K;
1918 pf->tx_itr = ixl_tx_itr;
1920 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1921 device_printf(dev, "Invalid rx_itr value of %d set!\n",
1923 device_printf(dev, "rx_itr must be between %d and %d, "
1926 device_printf(dev, "Using default value of %d instead\n",
1928 pf->rx_itr = IXL_ITR_8K;
1930 pf->rx_itr = ixl_rx_itr;
1933 if (ixl_flow_control != -1) {
1934 if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1936 "Invalid flow_control value of %d set!\n",
1939 "flow_control must be between %d and %d, "
1940 "inclusive\n", 0, 3);
1942 "Using default configuration instead\n");
1944 pf->fc = ixl_flow_control;