1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
40 #include "ixl_iw_int.h"
44 #include "ixl_pf_iov.h"
47 /*********************************************************************
49 *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR 2
51 #define IXL_DRIVER_VERSION_MINOR 1
52 #define IXL_DRIVER_VERSION_BUILD 0
54 #define IXL_DRIVER_VERSION_STRING \
55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \
56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \
57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
59 /*********************************************************************
62 * Used by probe to select devices to load on
64 * ( Vendor ID, Device ID, Branding String )
65 *********************************************************************/
67 static pci_vendor_info_t ixl_vendor_info_array[] =
69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 /* required last entry */
89 /*********************************************************************
91 *********************************************************************/
92 /*** IFLIB interface ***/
93 static void *ixl_register(device_t dev);
94 static int ixl_if_attach_pre(if_ctx_t ctx);
95 static int ixl_if_attach_post(if_ctx_t ctx);
96 static int ixl_if_detach(if_ctx_t ctx);
97 static int ixl_if_shutdown(if_ctx_t ctx);
98 static int ixl_if_suspend(if_ctx_t ctx);
99 static int ixl_if_resume(if_ctx_t ctx);
100 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void ixl_if_enable_intr(if_ctx_t ctx);
102 static void ixl_if_disable_intr(if_ctx_t ctx);
103 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void ixl_if_queues_free(if_ctx_t ctx);
108 static void ixl_if_update_admin_status(if_ctx_t ctx);
109 static void ixl_if_multi_set(if_ctx_t ctx);
110 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int ixl_if_media_change(if_ctx_t ctx);
113 static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
119 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
121 static void ixl_if_vflr_handle(if_ctx_t ctx);
125 static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int);
126 static void ixl_save_pf_tunables(struct ixl_pf *);
127 static int ixl_allocate_pci_resources(struct ixl_pf *);
129 /*********************************************************************
130 * FreeBSD Device Interface Entry Points
131 *********************************************************************/
133 static device_method_t ixl_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_register, ixl_register),
136 DEVMETHOD(device_probe, iflib_device_probe),
137 DEVMETHOD(device_attach, iflib_device_attach),
138 DEVMETHOD(device_detach, iflib_device_detach),
139 DEVMETHOD(device_shutdown, iflib_device_shutdown),
141 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
142 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
143 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
148 static driver_t ixl_driver = {
149 "ixl", ixl_methods, sizeof(struct ixl_pf),
152 devclass_t ixl_devclass;
153 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
154 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
155 MODULE_VERSION(ixl, 3);
157 MODULE_DEPEND(ixl, pci, 1, 1, 1);
158 MODULE_DEPEND(ixl, ether, 1, 1, 1);
159 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
161 static device_method_t ixl_if_methods[] = {
162 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
163 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
164 DEVMETHOD(ifdi_detach, ixl_if_detach),
165 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
166 DEVMETHOD(ifdi_suspend, ixl_if_suspend),
167 DEVMETHOD(ifdi_resume, ixl_if_resume),
168 DEVMETHOD(ifdi_init, ixl_if_init),
169 DEVMETHOD(ifdi_stop, ixl_if_stop),
170 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
171 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
172 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
173 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
174 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
175 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
176 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
177 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
178 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
179 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
180 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
181 DEVMETHOD(ifdi_media_status, ixl_if_media_status),
182 DEVMETHOD(ifdi_media_change, ixl_if_media_change),
183 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
184 DEVMETHOD(ifdi_timer, ixl_if_timer),
185 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
186 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
187 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
188 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
189 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
191 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
192 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
193 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
194 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
201 static driver_t ixl_if_driver = {
202 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
206 ** TUNEABLE PARAMETERS:
209 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
210 "ixl driver parameters");
213 * Leave this on unless you need to send flow control
214 * frames (or other control frames) from software
216 static int ixl_enable_tx_fc_filter = 1;
217 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
218 &ixl_enable_tx_fc_filter);
219 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
220 &ixl_enable_tx_fc_filter, 0,
221 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
223 static int ixl_i2c_access_method = 0;
224 TUNABLE_INT("hw.ixl.i2c_access_method",
225 &ixl_i2c_access_method);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
227 &ixl_i2c_access_method, 0,
228 IXL_SYSCTL_HELP_I2C_METHOD);
230 static int ixl_enable_vf_loopback = 1;
231 TUNABLE_INT("hw.ixl.enable_vf_loopback",
232 &ixl_enable_vf_loopback);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
234 &ixl_enable_vf_loopback, 0,
235 IXL_SYSCTL_HELP_VF_LOOPBACK);
238 * Different method for processing TX descriptor
241 static int ixl_enable_head_writeback = 1;
242 TUNABLE_INT("hw.ixl.enable_head_writeback",
243 &ixl_enable_head_writeback);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
245 &ixl_enable_head_writeback, 0,
246 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
248 static int ixl_core_debug_mask = 0;
249 TUNABLE_INT("hw.ixl.core_debug_mask",
250 &ixl_core_debug_mask);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
252 &ixl_core_debug_mask, 0,
253 "Display debug statements that are printed in non-shared code");
255 static int ixl_shared_debug_mask = 0;
256 TUNABLE_INT("hw.ixl.shared_debug_mask",
257 &ixl_shared_debug_mask);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
259 &ixl_shared_debug_mask, 0,
260 "Display debug statements that are printed in shared code");
264 ** Controls for Interrupt Throttling
265 ** - true/false for dynamic adjustment
266 ** - default values for static ITR
268 static int ixl_dynamic_rx_itr = 0;
269 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
271 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
273 static int ixl_dynamic_tx_itr = 0;
274 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
276 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
279 static int ixl_rx_itr = IXL_ITR_8K;
280 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
281 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
282 &ixl_rx_itr, 0, "RX Interrupt Rate");
284 static int ixl_tx_itr = IXL_ITR_4K;
285 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
286 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
287 &ixl_tx_itr, 0, "TX Interrupt Rate");
290 int ixl_enable_iwarp = 0;
291 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
293 &ixl_enable_iwarp, 0, "iWARP enabled");
295 #if __FreeBSD_version < 1100000
296 int ixl_limit_iwarp_msix = 1;
298 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
300 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
301 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
302 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
305 extern struct if_txrx ixl_txrx_hwb;
306 extern struct if_txrx ixl_txrx_dwb;
308 static struct if_shared_ctx ixl_sctx_init = {
309 .isc_magic = IFLIB_MAGIC,
310 .isc_q_align = PAGE_SIZE,
311 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
312 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
313 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
314 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
315 .isc_rx_maxsize = 16384,
316 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
317 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
322 .isc_admin_intrcnt = 1,
323 .isc_vendor_info = ixl_vendor_info_array,
324 .isc_driver_version = IXL_DRIVER_VERSION_STRING,
325 .isc_driver = &ixl_if_driver,
326 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
328 .isc_nrxd_min = {IXL_MIN_RING},
329 .isc_ntxd_min = {IXL_MIN_RING},
330 .isc_nrxd_max = {IXL_MAX_RING},
331 .isc_ntxd_max = {IXL_MAX_RING},
332 .isc_nrxd_default = {IXL_DEFAULT_RING},
333 .isc_ntxd_default = {IXL_DEFAULT_RING},
336 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
340 ixl_register(device_t dev)
346 ixl_allocate_pci_resources(struct ixl_pf *pf)
348 device_t dev = iflib_get_dev(pf->vsi.ctx);
349 struct i40e_hw *hw = &pf->hw;
354 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
357 if (!(pf->pci_mem)) {
358 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
362 /* Save off the PCI information */
363 hw->vendor_id = pci_get_vendor(dev);
364 hw->device_id = pci_get_device(dev);
365 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
366 hw->subsystem_vendor_id =
367 pci_read_config(dev, PCIR_SUBVEND_0, 2);
368 hw->subsystem_device_id =
369 pci_read_config(dev, PCIR_SUBDEV_0, 2);
371 hw->bus.device = pci_get_slot(dev);
372 hw->bus.func = pci_get_function(dev);
374 /* Save off register access information */
375 pf->osdep.mem_bus_space_tag =
376 rman_get_bustag(pf->pci_mem);
377 pf->osdep.mem_bus_space_handle =
378 rman_get_bushandle(pf->pci_mem);
379 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
380 pf->osdep.flush_reg = I40E_GLGEN_STAT;
383 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
384 pf->hw.back = &pf->osdep;
390 ixl_if_attach_pre(if_ctx_t ctx)
396 if_softc_ctx_t scctx;
397 struct i40e_filter_control_settings filter;
398 enum i40e_status_code status;
401 INIT_DBG_DEV(dev, "begin");
403 dev = iflib_get_dev(ctx);
404 pf = iflib_get_softc(ctx);
416 vsi->media = iflib_get_media(ctx);
417 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
419 /* Save tunable values */
420 ixl_save_pf_tunables(pf);
422 /* Do PCI setup - map BAR0, etc */
423 if (ixl_allocate_pci_resources(pf)) {
424 device_printf(dev, "Allocation of PCI resources failed\n");
429 /* Establish a clean starting point */
431 status = i40e_pf_reset(hw);
433 device_printf(dev, "PF reset failure %s\n",
434 i40e_stat_str(hw, status));
439 /* Initialize the shared code */
440 status = i40e_init_shared_code(hw);
442 device_printf(dev, "Unable to initialize shared code, error %s\n",
443 i40e_stat_str(hw, status));
448 /* Set up the admin queue */
449 hw->aq.num_arq_entries = IXL_AQ_LEN;
450 hw->aq.num_asq_entries = IXL_AQ_LEN;
451 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
452 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
454 status = i40e_init_adminq(hw);
455 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
456 device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
457 i40e_stat_str(hw, status));
461 ixl_print_nvm_version(pf);
463 if (status == I40E_ERR_FIRMWARE_API_VERSION) {
464 device_printf(dev, "The driver for the device stopped "
465 "because the NVM image is newer than expected.\n");
466 device_printf(dev, "You must install the most recent version of "
467 "the network driver.\n");
472 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
473 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
474 device_printf(dev, "The driver for the device detected "
475 "a newer version of the NVM image than expected.\n");
476 device_printf(dev, "Please install the most recent version "
477 "of the network driver.\n");
478 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
479 device_printf(dev, "The driver for the device detected "
480 "an older version of the NVM image than expected.\n");
481 device_printf(dev, "Please update the NVM image.\n");
485 i40e_clear_pxe_mode(hw);
487 /* Get capabilities from the device */
488 error = ixl_get_hw_capabilities(pf);
490 device_printf(dev, "get_hw_capabilities failed: %d\n",
495 /* Set up host memory cache */
496 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
497 hw->func_caps.num_rx_qp, 0, 0);
499 device_printf(dev, "init_lan_hmc failed: %s\n",
500 i40e_stat_str(hw, status));
503 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
505 device_printf(dev, "configure_lan_hmc failed: %s\n",
506 i40e_stat_str(hw, status));
510 /* Disable LLDP from the firmware for certain NVM versions */
511 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
512 (pf->hw.aq.fw_maj_ver < 4)) {
513 i40e_aq_stop_lldp(hw, TRUE, NULL);
514 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
517 /* Get MAC addresses from hardware */
518 i40e_get_mac_addr(hw, hw->mac.addr);
519 error = i40e_validate_mac_addr(hw->mac.addr);
521 device_printf(dev, "validate_mac_addr failed: %d\n", error);
524 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
525 iflib_set_mac(ctx, hw->mac.addr);
526 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
528 /* Set up the device filtering */
529 bzero(&filter, sizeof(filter));
530 filter.enable_ethtype = TRUE;
531 filter.enable_macvlan = TRUE;
532 filter.enable_fdir = FALSE;
533 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
534 if (i40e_set_filter_control(hw, &filter))
535 device_printf(dev, "i40e_set_filter_control() failed\n");
537 /* Query device FW LLDP status */
538 ixl_get_fw_lldp_status(pf);
539 /* Tell FW to apply DCB config on link up */
540 i40e_aq_set_dcb_parameters(hw, true, NULL);
542 /* Fill out iflib parameters */
543 if (hw->mac.type == I40E_MAC_X722)
544 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
546 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
547 if (vsi->enable_head_writeback) {
548 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
549 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
550 scctx->isc_txrx = &ixl_txrx_hwb;
552 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
553 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
554 scctx->isc_txrx = &ixl_txrx_dwb;
556 scctx->isc_txrx->ift_legacy_intr = ixl_intr;
557 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
558 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
559 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
560 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
561 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
562 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
563 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
564 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
565 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
566 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
568 INIT_DBG_DEV(dev, "end");
572 i40e_shutdown_lan_hmc(hw);
574 i40e_shutdown_adminq(hw);
576 ixl_free_pci_resources(pf);
582 ixl_if_attach_post(if_ctx_t ctx)
589 enum i40e_status_code status;
591 INIT_DBG_DEV(dev, "begin");
593 dev = iflib_get_dev(ctx);
594 pf = iflib_get_softc(ctx);
596 vsi->ifp = iflib_get_ifp(ctx);
599 /* Save off determined number of queues for interface */
600 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
601 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
603 /* Setup OS network interface / ifnet */
604 if (ixl_setup_interface(dev, pf)) {
605 device_printf(dev, "interface setup failed!\n");
610 /* Determine link state */
611 if (ixl_attach_get_link_status(pf)) {
616 error = ixl_switch_config(pf);
618 device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
623 /* Add protocol filters to list */
624 ixl_init_filters(vsi);
626 /* Init queue allocation manager */
627 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
629 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
633 /* reserve a contiguous allocation for the PF's VSI */
634 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
635 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
637 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
641 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
642 pf->qtag.num_allocated, pf->qtag.num_active);
644 /* Limit PHY interrupts to link, autoneg, and modules failure */
645 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
648 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
649 " aq_err %s\n", i40e_stat_str(hw, status),
650 i40e_aq_str(hw, hw->aq.asq_last_status));
654 /* Get the bus configuration and set the shared code */
655 ixl_get_bus_info(pf);
657 /* Keep admin queue interrupts active while driver is loaded */
658 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
659 ixl_configure_intr0_msix(pf);
660 ixl_enable_intr0(hw);
663 /* Set initial advertised speed sysctl value */
664 ixl_set_initial_advertised_speeds(pf);
666 /* Initialize statistics & add sysctls */
667 ixl_add_device_sysctls(pf);
668 ixl_pf_reset_stats(pf);
669 ixl_update_stats_counters(pf);
670 ixl_add_hw_stats(pf);
672 hw->phy.get_link_info = true;
673 i40e_get_link_status(hw, &pf->link_up);
674 ixl_update_link_status(pf);
677 ixl_initialize_sriov(pf);
681 if (hw->func_caps.iwarp && ixl_enable_iwarp) {
682 pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
683 if (pf->iw_enabled) {
684 error = ixl_iw_pf_attach(pf);
687 "interfacing to iWARP driver failed: %d\n",
691 device_printf(dev, "iWARP ready\n");
693 device_printf(dev, "iWARP disabled on this device "
694 "(no MSI-X vectors)\n");
696 pf->iw_enabled = false;
697 device_printf(dev, "The device is not iWARP enabled\n");
701 INIT_DBG_DEV(dev, "end");
705 INIT_DEBUGOUT("end: error %d", error);
706 /* ixl_if_detach() is called on error from this */
711 * XXX: iflib always ignores the return value of detach()
712 * -> This means that this isn't allowed to fail
715 ixl_if_detach(if_ctx_t ctx)
717 struct ixl_pf *pf = iflib_get_softc(ctx);
718 struct ixl_vsi *vsi = &pf->vsi;
719 struct i40e_hw *hw = &pf->hw;
720 device_t dev = pf->dev;
721 enum i40e_status_code status;
726 INIT_DBG_DEV(dev, "begin");
729 if (ixl_enable_iwarp && pf->iw_enabled) {
730 error = ixl_iw_pf_detach(pf);
731 if (error == EBUSY) {
732 device_printf(dev, "iwarp in use; stop it first.\n");
737 /* Remove all previously allocated media types */
738 ifmedia_removeall(vsi->media);
740 /* Shutdown LAN HMC */
741 if (hw->hmc.hmc_obj) {
742 status = i40e_shutdown_lan_hmc(hw);
745 "i40e_shutdown_lan_hmc() failed with status %s\n",
746 i40e_stat_str(hw, status));
749 /* Shutdown admin queue */
750 ixl_disable_intr0(hw);
751 status = i40e_shutdown_adminq(hw);
754 "i40e_shutdown_adminq() failed with status %s\n",
755 i40e_stat_str(hw, status));
757 ixl_pf_qmgr_destroy(&pf->qmgr);
758 ixl_free_pci_resources(pf);
759 ixl_free_mac_filters(vsi);
760 INIT_DBG_DEV(dev, "end");
765 ixl_if_shutdown(if_ctx_t ctx)
769 INIT_DEBUGOUT("ixl_if_shutdown: begin");
771 /* TODO: Call ixl_if_stop()? */
773 /* TODO: Then setup low power mode */
779 ixl_if_suspend(if_ctx_t ctx)
783 INIT_DEBUGOUT("ixl_if_suspend: begin");
785 /* TODO: Call ixl_if_stop()? */
787 /* TODO: Then setup low power mode */
793 ixl_if_resume(if_ctx_t ctx)
795 struct ifnet *ifp = iflib_get_ifp(ctx);
797 INIT_DEBUGOUT("ixl_if_resume: begin");
799 /* Read & clear wake-up registers */
801 /* Required after D3->D0 transition */
802 if (ifp->if_flags & IFF_UP)
809 ixl_if_init(if_ctx_t ctx)
811 struct ixl_pf *pf = iflib_get_softc(ctx);
812 struct ixl_vsi *vsi = &pf->vsi;
813 struct i40e_hw *hw = &pf->hw;
814 struct ifnet *ifp = iflib_get_ifp(ctx);
815 device_t dev = iflib_get_dev(ctx);
816 u8 tmpaddr[ETHER_ADDR_LEN];
820 * If the aq is dead here, it probably means something outside of the driver
821 * did something to the adapter, like a PF reset.
822 * So, rebuild the driver's state here if that occurs.
824 if (!i40e_check_asq_alive(&pf->hw)) {
825 device_printf(dev, "Admin Queue is down; resetting...\n");
826 ixl_teardown_hw_structs(pf);
827 ixl_rebuild_hw_structs_after_reset(pf);
830 /* Get the latest mac address... User might use a LAA */
831 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
832 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
833 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
834 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
835 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
836 ret = i40e_aq_mac_address_write(hw,
837 I40E_AQC_WRITE_TYPE_LAA_ONLY,
840 device_printf(dev, "LLA address change failed!!\n");
843 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
846 iflib_set_mac(ctx, hw->mac.addr);
848 /* Prepare the VSI: rings, hmc contexts, etc... */
849 if (ixl_initialize_vsi(vsi)) {
850 device_printf(dev, "initialize vsi failed!!\n");
854 /* Reconfigure multicast filters in HW */
855 ixl_if_multi_set(ctx);
860 /* Set up MSI-X routing and the ITR settings */
861 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
862 ixl_configure_queue_intr_msix(pf);
863 ixl_configure_itr(pf);
865 ixl_configure_legacy(pf);
867 if (vsi->enable_head_writeback)
868 ixl_init_tx_cidx(vsi);
870 ixl_init_tx_rsqs(vsi);
872 ixl_enable_rings(vsi);
874 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
876 /* Re-add configure filters to HW */
877 ixl_reconfigure_filters(vsi);
879 /* Configure promiscuous mode */
880 ixl_if_promisc_set(ctx, if_getflags(ifp));
883 if (ixl_enable_iwarp && pf->iw_enabled) {
884 ret = ixl_iw_pf_init(pf);
887 "initialize iwarp failed, code %d\n", ret);
893 ixl_if_stop(if_ctx_t ctx)
895 struct ixl_pf *pf = iflib_get_softc(ctx);
896 struct ixl_vsi *vsi = &pf->vsi;
898 INIT_DEBUGOUT("ixl_if_stop: begin\n");
900 // TODO: This may need to be reworked
902 /* Stop iWARP device */
903 if (ixl_enable_iwarp && pf->iw_enabled)
907 ixl_disable_rings_intr(vsi);
908 ixl_disable_rings(pf, vsi, &pf->qtag);
912 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
914 struct ixl_pf *pf = iflib_get_softc(ctx);
915 struct ixl_vsi *vsi = &pf->vsi;
916 struct ixl_rx_queue *rx_que = vsi->rx_queues;
917 struct ixl_tx_queue *tx_que = vsi->tx_queues;
918 int err, i, rid, vector = 0;
921 MPASS(vsi->shared->isc_nrxqsets > 0);
922 MPASS(vsi->shared->isc_ntxqsets > 0);
924 /* Admin Que must use vector 0*/
926 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
927 ixl_msix_adminq, pf, 0, "aq");
929 iflib_irq_free(ctx, &vsi->irq);
930 device_printf(iflib_get_dev(ctx),
931 "Failed to register Admin Que handler");
934 /* Create soft IRQ for handling VFLRs */
935 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
937 /* Now set up the stations */
938 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
941 snprintf(buf, sizeof(buf), "rxq%d", i);
942 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
943 IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
944 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
945 * what's expected in the iflib context? */
947 device_printf(iflib_get_dev(ctx),
948 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
949 vsi->num_rx_queues = i + 1;
952 rx_que->msix = vector;
955 bzero(buf, sizeof(buf));
957 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
958 snprintf(buf, sizeof(buf), "txq%d", i);
959 iflib_softirq_alloc_generic(ctx,
960 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
961 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
963 /* TODO: Maybe call a strategy function for this to figure out which
964 * interrupts to map Tx queues to. I don't know if there's an immediately
965 * better way than this other than a user-supplied map, though. */
966 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
971 iflib_irq_free(ctx, &vsi->irq);
972 rx_que = vsi->rx_queues;
973 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
974 iflib_irq_free(ctx, &rx_que->que_irq);
979 * Enable all interrupts
982 * iflib_init_locked, after ixl_if_init()
985 ixl_if_enable_intr(if_ctx_t ctx)
987 struct ixl_pf *pf = iflib_get_softc(ctx);
988 struct ixl_vsi *vsi = &pf->vsi;
989 struct i40e_hw *hw = vsi->hw;
990 struct ixl_rx_queue *que = vsi->rx_queues;
992 ixl_enable_intr0(hw);
993 /* Enable queue interrupts */
994 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
995 /* TODO: Queue index parameter is probably wrong */
996 ixl_enable_queue(hw, que->rxr.me);
1000 * Disable queue interrupts
1002 * Other interrupt causes need to remain active.
1005 ixl_if_disable_intr(if_ctx_t ctx)
1007 struct ixl_pf *pf = iflib_get_softc(ctx);
1008 struct ixl_vsi *vsi = &pf->vsi;
1009 struct i40e_hw *hw = vsi->hw;
1010 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1012 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1013 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1014 ixl_disable_queue(hw, rx_que->msix - 1);
1016 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1017 // stops queues from triggering interrupts
1018 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1023 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1025 struct ixl_pf *pf = iflib_get_softc(ctx);
1026 struct ixl_vsi *vsi = &pf->vsi;
1027 struct i40e_hw *hw = vsi->hw;
1028 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
1030 ixl_enable_queue(hw, rx_que->msix - 1);
1035 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1037 struct ixl_pf *pf = iflib_get_softc(ctx);
1038 struct ixl_vsi *vsi = &pf->vsi;
1039 struct i40e_hw *hw = vsi->hw;
1040 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1042 ixl_enable_queue(hw, tx_que->msix - 1);
1047 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1049 struct ixl_pf *pf = iflib_get_softc(ctx);
1050 struct ixl_vsi *vsi = &pf->vsi;
1051 if_softc_ctx_t scctx = vsi->shared;
1052 struct ixl_tx_queue *que;
1053 int i, j, error = 0;
1055 MPASS(scctx->isc_ntxqsets > 0);
1057 MPASS(scctx->isc_ntxqsets == ntxqsets);
1059 /* Allocate queue structure memory */
1060 if (!(vsi->tx_queues =
1061 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1062 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1066 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1067 struct tx_ring *txr = &que->txr;
1072 if (!vsi->enable_head_writeback) {
1073 /* Allocate report status array */
1074 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1075 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1079 /* Init report status array */
1080 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1081 txr->tx_rsq[j] = QIDX_INVALID;
1083 /* get the virtual and physical address of the hardware queues */
1084 txr->tail = I40E_QTX_TAIL(txr->me);
1085 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1086 txr->tx_paddr = paddrs[i * ntxqs];
1092 ixl_if_queues_free(ctx);
1097 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1099 struct ixl_pf *pf = iflib_get_softc(ctx);
1100 struct ixl_vsi *vsi = &pf->vsi;
1101 struct ixl_rx_queue *que;
1105 if_softc_ctx_t scctx = vsi->shared;
1106 MPASS(scctx->isc_nrxqsets > 0);
1108 MPASS(scctx->isc_nrxqsets == nrxqsets);
1111 /* Allocate queue structure memory */
1112 if (!(vsi->rx_queues =
1113 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1114 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1115 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1120 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1121 struct rx_ring *rxr = &que->rxr;
1126 /* get the virtual and physical address of the hardware queues */
1127 rxr->tail = I40E_QRX_TAIL(rxr->me);
1128 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1129 rxr->rx_paddr = paddrs[i * nrxqs];
1135 ixl_if_queues_free(ctx);
1140 ixl_if_queues_free(if_ctx_t ctx)
1142 struct ixl_pf *pf = iflib_get_softc(ctx);
1143 struct ixl_vsi *vsi = &pf->vsi;
1145 if (!vsi->enable_head_writeback) {
1146 struct ixl_tx_queue *que;
1149 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1150 struct tx_ring *txr = &que->txr;
1151 if (txr->tx_rsq != NULL) {
1152 free(txr->tx_rsq, M_IXL);
1158 if (vsi->tx_queues != NULL) {
1159 free(vsi->tx_queues, M_IXL);
1160 vsi->tx_queues = NULL;
1162 if (vsi->rx_queues != NULL) {
1163 free(vsi->rx_queues, M_IXL);
1164 vsi->rx_queues = NULL;
1169 ixl_update_link_status(struct ixl_pf *pf)
1171 struct ixl_vsi *vsi = &pf->vsi;
1172 struct i40e_hw *hw = &pf->hw;
1176 if (vsi->link_active == FALSE) {
1177 vsi->link_active = TRUE;
1178 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1179 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1180 ixl_link_up_msg(pf);
1182 ixl_broadcast_link_state(pf);
1186 } else { /* Link down */
1187 if (vsi->link_active == TRUE) {
1188 vsi->link_active = FALSE;
1189 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1191 ixl_broadcast_link_state(pf);
1198 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1200 device_t dev = pf->dev;
1201 u32 rxq_idx, qtx_ctl;
1203 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1204 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1205 qtx_ctl = e->desc.params.external.param1;
1207 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1208 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1212 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1214 enum i40e_status_code status = I40E_SUCCESS;
1215 struct i40e_arq_event_info event;
1216 struct i40e_hw *hw = &pf->hw;
1217 device_t dev = pf->dev;
1221 event.buf_len = IXL_AQ_BUF_SZ;
1222 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1223 if (!event.msg_buf) {
1224 device_printf(dev, "%s: Unable to allocate memory for Admin"
1225 " Queue event!\n", __func__);
1229 /* clean and process any events */
1231 status = i40e_clean_arq_element(hw, &event, pending);
1234 opcode = LE16_TO_CPU(event.desc.opcode);
1235 ixl_dbg(pf, IXL_DBG_AQ,
1236 "Admin Queue event: %#06x\n", opcode);
1238 case i40e_aqc_opc_get_link_status:
1239 ixl_link_event(pf, &event);
1241 case i40e_aqc_opc_send_msg_to_pf:
1243 ixl_handle_vf_msg(pf, &event);
1247 * This should only occur on no-drop queues, which
1248 * aren't currently configured.
1250 case i40e_aqc_opc_event_lan_overflow:
1251 ixl_handle_lan_overflow_event(pf, &event);
1256 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1258 free(event.msg_buf, M_IXL);
1260 /* Re-enable admin queue interrupt cause */
1261 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1262 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1263 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1269 ixl_if_update_admin_status(if_ctx_t ctx)
1271 struct ixl_pf *pf = iflib_get_softc(ctx);
1272 struct i40e_hw *hw = &pf->hw;
1275 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1276 ixl_handle_empr_reset(pf);
1278 if (pf->state & IXL_PF_STATE_MDD_PENDING)
1279 ixl_handle_mdd_event(pf);
1281 ixl_process_adminq(pf, &pending);
1282 ixl_update_link_status(pf);
1283 ixl_update_stats_counters(pf);
1286 * If there are still messages to process, reschedule ourselves.
1287 * Otherwise, re-enable our interrupt and go to sleep.
1290 iflib_admin_intr_deferred(ctx);
1292 ixl_enable_intr0(hw);
1296 ixl_if_multi_set(if_ctx_t ctx)
1298 struct ixl_pf *pf = iflib_get_softc(ctx);
1299 struct ixl_vsi *vsi = &pf->vsi;
1300 struct i40e_hw *hw = vsi->hw;
1301 int mcnt = 0, flags;
1304 IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1306 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1307 /* Delete filters for removed multicast addresses */
1308 del_mcnt = ixl_del_multi(vsi);
1309 vsi->num_macs -= del_mcnt;
1311 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1312 i40e_aq_set_vsi_multicast_promiscuous(hw,
1313 vsi->seid, TRUE, NULL);
1316 /* (re-)install filters for all mcast addresses */
1317 /* XXX: This bypasses filter count tracking code! */
1318 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1321 vsi->num_macs += mcnt;
1322 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1323 ixl_add_hw_filters(vsi, flags, mcnt);
1326 ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1327 __func__, vsi->num_macs);
1328 IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1332 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1334 struct ixl_pf *pf = iflib_get_softc(ctx);
1335 struct ixl_vsi *vsi = &pf->vsi;
1337 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1338 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1339 ETHER_VLAN_ENCAP_LEN)
1342 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1343 ETHER_VLAN_ENCAP_LEN;
1349 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1351 struct ixl_pf *pf = iflib_get_softc(ctx);
1352 struct i40e_hw *hw = &pf->hw;
1354 INIT_DEBUGOUT("ixl_media_status: begin");
1356 ifmr->ifm_status = IFM_AVALID;
1357 ifmr->ifm_active = IFM_ETHER;
1363 ifmr->ifm_status |= IFM_ACTIVE;
1364 /* Hardware is always full-duplex */
1365 ifmr->ifm_active |= IFM_FDX;
1367 switch (hw->phy.link_info.phy_type) {
1369 case I40E_PHY_TYPE_100BASE_TX:
1370 ifmr->ifm_active |= IFM_100_TX;
1373 case I40E_PHY_TYPE_1000BASE_T:
1374 ifmr->ifm_active |= IFM_1000_T;
1376 case I40E_PHY_TYPE_1000BASE_SX:
1377 ifmr->ifm_active |= IFM_1000_SX;
1379 case I40E_PHY_TYPE_1000BASE_LX:
1380 ifmr->ifm_active |= IFM_1000_LX;
1382 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1383 ifmr->ifm_active |= IFM_1000_T;
1386 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1387 ifmr->ifm_active |= IFM_10G_TWINAX;
1389 case I40E_PHY_TYPE_10GBASE_SR:
1390 ifmr->ifm_active |= IFM_10G_SR;
1392 case I40E_PHY_TYPE_10GBASE_LR:
1393 ifmr->ifm_active |= IFM_10G_LR;
1395 case I40E_PHY_TYPE_10GBASE_T:
1396 ifmr->ifm_active |= IFM_10G_T;
1398 case I40E_PHY_TYPE_XAUI:
1399 case I40E_PHY_TYPE_XFI:
1400 ifmr->ifm_active |= IFM_10G_TWINAX;
1402 case I40E_PHY_TYPE_10GBASE_AOC:
1403 ifmr->ifm_active |= IFM_10G_AOC;
1406 case I40E_PHY_TYPE_25GBASE_KR:
1407 ifmr->ifm_active |= IFM_25G_KR;
1409 case I40E_PHY_TYPE_25GBASE_CR:
1410 ifmr->ifm_active |= IFM_25G_CR;
1412 case I40E_PHY_TYPE_25GBASE_SR:
1413 ifmr->ifm_active |= IFM_25G_SR;
1415 case I40E_PHY_TYPE_25GBASE_LR:
1416 ifmr->ifm_active |= IFM_25G_LR;
1418 case I40E_PHY_TYPE_25GBASE_AOC:
1419 ifmr->ifm_active |= IFM_25G_AOC;
1421 case I40E_PHY_TYPE_25GBASE_ACC:
1422 ifmr->ifm_active |= IFM_25G_ACC;
1425 case I40E_PHY_TYPE_40GBASE_CR4:
1426 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1427 ifmr->ifm_active |= IFM_40G_CR4;
1429 case I40E_PHY_TYPE_40GBASE_SR4:
1430 ifmr->ifm_active |= IFM_40G_SR4;
1432 case I40E_PHY_TYPE_40GBASE_LR4:
1433 ifmr->ifm_active |= IFM_40G_LR4;
1435 case I40E_PHY_TYPE_XLAUI:
1436 ifmr->ifm_active |= IFM_OTHER;
1438 case I40E_PHY_TYPE_1000BASE_KX:
1439 ifmr->ifm_active |= IFM_1000_KX;
1441 case I40E_PHY_TYPE_SGMII:
1442 ifmr->ifm_active |= IFM_1000_SGMII;
1444 /* ERJ: What's the difference between these? */
1445 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1446 case I40E_PHY_TYPE_10GBASE_CR1:
1447 ifmr->ifm_active |= IFM_10G_CR1;
1449 case I40E_PHY_TYPE_10GBASE_KX4:
1450 ifmr->ifm_active |= IFM_10G_KX4;
1452 case I40E_PHY_TYPE_10GBASE_KR:
1453 ifmr->ifm_active |= IFM_10G_KR;
1455 case I40E_PHY_TYPE_SFI:
1456 ifmr->ifm_active |= IFM_10G_SFI;
1458 /* Our single 20G media type */
1459 case I40E_PHY_TYPE_20GBASE_KR2:
1460 ifmr->ifm_active |= IFM_20G_KR2;
1462 case I40E_PHY_TYPE_40GBASE_KR4:
1463 ifmr->ifm_active |= IFM_40G_KR4;
1465 case I40E_PHY_TYPE_XLPPI:
1466 case I40E_PHY_TYPE_40GBASE_AOC:
1467 ifmr->ifm_active |= IFM_40G_XLPPI;
1469 /* Unknown to driver */
1471 ifmr->ifm_active |= IFM_UNKNOWN;
1474 /* Report flow control status as well */
1475 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1476 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1477 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1478 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1482 ixl_if_media_change(if_ctx_t ctx)
1484 struct ifmedia *ifm = iflib_get_media(ctx);
1486 INIT_DEBUGOUT("ixl_media_change: begin");
1488 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1491 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1496 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1498 struct ixl_pf *pf = iflib_get_softc(ctx);
1499 struct ixl_vsi *vsi = &pf->vsi;
1500 struct ifnet *ifp = iflib_get_ifp(ctx);
1501 struct i40e_hw *hw = vsi->hw;
1503 bool uni = FALSE, multi = FALSE;
1505 if (flags & IFF_PROMISC)
1507 else if (flags & IFF_ALLMULTI ||
1508 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1511 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1512 vsi->seid, uni, NULL, true);
1515 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1516 vsi->seid, multi, NULL);
1521 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1526 /* Fire off the adminq task */
1527 iflib_admin_intr_deferred(ctx);
1531 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1533 struct ixl_pf *pf = iflib_get_softc(ctx);
1534 struct ixl_vsi *vsi = &pf->vsi;
1535 struct i40e_hw *hw = vsi->hw;
1537 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1541 ixl_add_filter(vsi, hw->mac.addr, vtag);
1545 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1547 struct ixl_pf *pf = iflib_get_softc(ctx);
1548 struct ixl_vsi *vsi = &pf->vsi;
1549 struct i40e_hw *hw = vsi->hw;
1551 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1555 ixl_del_filter(vsi, hw->mac.addr, vtag);
1559 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1561 struct ixl_pf *pf = iflib_get_softc(ctx);
1562 struct ixl_vsi *vsi = &pf->vsi;
1563 if_t ifp = iflib_get_ifp(ctx);
1566 case IFCOUNTER_IPACKETS:
1567 return (vsi->ipackets);
1568 case IFCOUNTER_IERRORS:
1569 return (vsi->ierrors);
1570 case IFCOUNTER_OPACKETS:
1571 return (vsi->opackets);
1572 case IFCOUNTER_OERRORS:
1573 return (vsi->oerrors);
1574 case IFCOUNTER_COLLISIONS:
1575 /* Collisions are by standard impossible in 40G/10G Ethernet */
1577 case IFCOUNTER_IBYTES:
1578 return (vsi->ibytes);
1579 case IFCOUNTER_OBYTES:
1580 return (vsi->obytes);
1581 case IFCOUNTER_IMCASTS:
1582 return (vsi->imcasts);
1583 case IFCOUNTER_OMCASTS:
1584 return (vsi->omcasts);
1585 case IFCOUNTER_IQDROPS:
1586 return (vsi->iqdrops);
1587 case IFCOUNTER_OQDROPS:
1588 return (vsi->oqdrops);
1589 case IFCOUNTER_NOPROTO:
1590 return (vsi->noproto);
1592 return (if_get_counter_default(ifp, cnt));
1598 ixl_if_vflr_handle(if_ctx_t ctx)
1600 struct ixl_pf *pf = iflib_get_softc(ctx);
1602 ixl_handle_vflr(pf);
1607 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1609 struct ixl_pf *pf = iflib_get_softc(ctx);
1611 if (pf->read_i2c_byte == NULL)
1614 for (int i = 0; i < req->len; i++)
1615 if (pf->read_i2c_byte(pf, req->offset + i,
1616 req->dev_addr, &req->data[i]))
1622 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1624 struct ixl_pf *pf = iflib_get_softc(ctx);
1625 struct ifdrv *ifd = (struct ifdrv *)data;
1628 /* NVM update command */
1629 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
1630 error = ixl_handle_nvmupd_cmd(pf, ifd);
1638 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1640 struct ixl_vsi *vsi = arg;
1642 if (ifma->ifma_addr->sa_family != AF_LINK)
1644 ixl_add_mc_filter(vsi,
1645 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1650 * Sanity check and save off tunable values.
1653 ixl_save_pf_tunables(struct ixl_pf *pf)
1655 device_t dev = pf->dev;
1657 /* Save tunable information */
1658 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1659 pf->dbg_mask = ixl_core_debug_mask;
1660 pf->hw.debug_mask = ixl_shared_debug_mask;
1661 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1662 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1664 pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1665 pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1668 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1669 pf->i2c_access_method = 0;
1671 pf->i2c_access_method = ixl_i2c_access_method;
1673 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1674 device_printf(dev, "Invalid tx_itr value of %d set!\n",
1676 device_printf(dev, "tx_itr must be between %d and %d, "
1679 device_printf(dev, "Using default value of %d instead\n",
1681 pf->tx_itr = IXL_ITR_4K;
1683 pf->tx_itr = ixl_tx_itr;
1685 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1686 device_printf(dev, "Invalid rx_itr value of %d set!\n",
1688 device_printf(dev, "rx_itr must be between %d and %d, "
1691 device_printf(dev, "Using default value of %d instead\n",
1693 pf->rx_itr = IXL_ITR_8K;
1695 pf->rx_itr = ixl_rx_itr;